diff --git a/s3/__init__.py b/s3/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/s3/test_utils/__init__.py b/s3/test_utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/s3/test_utils/config_provider.py b/s3/test_utils/config_provider.py new file mode 100644 index 000000000..76184d4e4 --- /dev/null +++ b/s3/test_utils/config_provider.py @@ -0,0 +1,8 @@ +import configparser + + +def read_config(s3_config_file): + config = configparser.ConfigParser() + config.read_string('[fake-section]\n' + open(s3_config_file).read()) + + return config['fake-section'] diff --git a/s3/test_utils/s3_backup.py b/s3/test_utils/s3_backup.py new file mode 100644 index 000000000..81d58d095 --- /dev/null +++ b/s3/test_utils/s3_backup.py @@ -0,0 +1,208 @@ +import os +import io +import sys + +import minio +from minio import Minio +from minio.deleteobjects import DeleteObject +import urllib3 +from pg_probackup2.storage.fs_backup import TestBackupDir +from pg_probackup2.init_helpers import init_params +from s3.test_utils import config_provider + +root = os.path.realpath(os.path.join(os.path.dirname(__file__), '../..')) +if root not in sys.path: + sys.path.append(root) + +status_forcelist = [413, # RequestBodyTooLarge + 429, # TooManyRequests + 500, # InternalError + 503, # ServerBusy + ] + +DEFAULT_CONF_FILE = 's3/tests/s3.conf' + + +class S3TestBackupDir(TestBackupDir): + is_file_based = False + + def __init__(self, *, rel_path, backup): + self.access_key = None + self.secret_key = None + self.s3_type = None + self.tmp_path = None + self.host = None + self.port = None + self.bucket_name = None + self.region = None + self.bucket = None + self.path_suffix = None + self.https = None + self.s3_config_file = None + self.ca_certificate = None + + self.set_s3_config_file() + self.setup_s3_env() + + path = "pg_probackup" + if self.path_suffix: + path += "_" + self.path_suffix + if self.tmp_path == '' or os.path.isabs(self.tmp_path): + self.path = f"{path}{self.tmp_path}/{rel_path}/{backup}" + else: + self.path = f"{path}/{self.tmp_path}/{rel_path}/{backup}" + + secure: bool = False + self.versioning: bool = False + if self.https in ['ON', 'HTTPS']: + secure = True + if self.https and self.ca_certificate: + http_client = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', + ca_certs=self.ca_certificate, + retries=urllib3.Retry(total=5, + backoff_factor=1, + status_forcelist=status_forcelist)) + else: + http_client = urllib3.PoolManager(retries=urllib3.Retry(total=5, + backoff_factor=1, + status_forcelist=status_forcelist)) + + self.conn = Minio(self.host + ":" + self.port, secure=secure, access_key=self.access_key, + secret_key=self.secret_key, http_client=http_client) + if not self.conn.bucket_exists(self.bucket): + raise Exception(f"Test bucket {self.bucket} does not exist.") + + try: + config = self.conn.get_bucket_versioning(self.bucket) + if config.status.lower() == "enabled" or config.status.lower() == "suspended": + self.versioning = True + else: + self.versioning = False + except Exception as e: + if "NotImplemented" in repr(e): + self.versioning = False + else: + raise e + self.pb_args = ('-B', '/' + self.path, f'--s3={init_params.s3_type}') + if self.s3_config_file: + self.pb_args += (f'--s3-config-file={self.s3_config_file}',) + return + + def setup_s3_env(self, s3_config=None): + self.tmp_path = os.environ.get('PGPROBACKUP_TMP_DIR', default='') + self.host = os.environ.get('PG_PROBACKUP_S3_HOST', default='') + + # If environment variables are not setup, use from config + if self.s3_config_file or s3_config: + minio_config = config_provider.read_config(self.s3_config_file or s3_config) + self.access_key = minio_config['access-key'] + self.secret_key = minio_config['secret-key'] + self.host = minio_config['s3-host'] + self.port = minio_config['s3-port'] + self.bucket = minio_config['s3-bucket'] + self.region = minio_config['s3-region'] + self.https = minio_config['s3-secure'] + init_params.s3_type = 'minio' + else: + self.access_key = os.environ.get('PG_PROBACKUP_S3_ACCESS_KEY') + self.secret_key = os.environ.get('PG_PROBACKUP_S3_SECRET_ACCESS_KEY') + self.host = os.environ.get('PG_PROBACKUP_S3_HOST') + self.port = os.environ.get('PG_PROBACKUP_S3_PORT') + self.bucket = os.environ.get('PG_PROBACKUP_S3_BUCKET_NAME') + self.region = os.environ.get('PG_PROBACKUP_S3_REGION') + self.https = os.environ.get('PG_PROBACKUP_S3_HTTPS') + self.ca_certificate = os.environ.get('PG_PROBACKUP_S3_CA_CERTIFICATE') + init_params.s3_type = os.environ.get('PG_PROBACKUP_S3_TEST') + + # multi-url case + # remove all urls from string except the first one + if ';' in self.host: + self.host = self.host[:self.host.find(';')] + if ':' in self.host: # also change port if it was overridden in multihost string + self.port = self.host[self.host.find(':') + 1:] + self.host = self.host[:self.host.find(':')] + + def set_s3_config_file(self): + s3_config = os.environ.get('PG_PROBACKUP_S3_CONFIG_FILE') + if s3_config is not None and s3_config.strip().lower() == "true": + self.s3_config_file = DEFAULT_CONF_FILE + else: + self.s3_config_file = s3_config + + def list_instance_backups(self, instance): + full_path = os.path.join(self.path, 'backups', instance) + candidates = self.conn.list_objects(self.bucket, prefix=full_path, recursive=True) + return [os.path.basename(os.path.dirname(x.object_name)) + for x in candidates if x.object_name.endswith('backup.control')] + + def list_files(self, sub_dir, recursive=False): + full_path = os.path.join(self.path, sub_dir) + # Need '/' in the end to find inside the folder + full_path_dir = full_path if full_path[-1] == '/' else full_path + '/' + object_list = self.conn.list_objects(self.bucket, prefix=full_path_dir, recursive=recursive) + return [obj.object_name.replace(full_path_dir, '', 1) + for obj in object_list + if not obj.is_dir] + + def list_dirs(self, sub_dir): + full_path = os.path.join(self.path, sub_dir) + # Need '/' in the end to find inside the folder + full_path_dir = full_path if full_path[-1] == '/' else full_path + '/' + object_list = self.conn.list_objects(self.bucket, prefix=full_path_dir, recursive=False) + return [obj.object_name.replace(full_path_dir, '', 1).rstrip('\\/') + for obj in object_list + if obj.is_dir] + + def read_file(self, sub_path, *, text=True): + full_path = os.path.join(self.path, sub_path) + bytes = self.conn.get_object(self.bucket, full_path).read() + if not text: + return bytes + return bytes.decode('utf-8') + + def write_file(self, sub_path, data, *, text=True): + full_path = os.path.join(self.path, sub_path) + if text: + data = data.encode('utf-8') + self.conn.put_object(self.bucket, full_path, io.BytesIO(data), length=len(data)) + + def cleanup(self, dir=''): + self.remove_dir(dir) + + def remove_file(self, sub_path): + full_path = os.path.join(self.path, sub_path) + self.conn.remove_object(self.bucket, full_path) + + def remove_dir(self, sub_path): + if sub_path: + full_path = os.path.join(self.path, sub_path) + else: + full_path = self.path + objs = self.conn.list_objects(self.bucket, prefix=full_path, recursive=True, + include_version=self.versioning) + delobjs = (DeleteObject(o.object_name, o.version_id) for o in objs) + errs = list(self.conn.remove_objects(self.bucket, delobjs)) + if errs: + strerrs = "; ".join(str(err) for err in errs) + raise Exception("There were errors: {0}".format(strerrs)) + + def exists(self, sub_path): + full_path = os.path.join(self.path, sub_path) + try: + self.conn.stat_object(self.bucket, full_path) + return True + except minio.error.S3Error as s3err: + if s3err.code == 'NoSuchKey': + return False + raise s3err + except Exception as err: + raise err + + def __str__(self): + return '/' + self.path + + def __repr__(self): + return "S3TestBackupDir" + str(self.path) + + def __fspath__(self): + return self.path diff --git a/s3/tests/__init__.py b/s3/tests/__init__.py new file mode 100644 index 000000000..d2982d81a --- /dev/null +++ b/s3/tests/__init__.py @@ -0,0 +1,17 @@ +import unittest +import os + +from . import auth_test, param_test + + +def load_tests(loader, tests, pattern): + suite = unittest.TestSuite() + + if 'PG_PROBACKUP_TEST_BASIC' in os.environ: + if os.environ['PG_PROBACKUP_TEST_BASIC'] == 'ON': + loader.testMethodPrefix = 'test_basic' + + suite.addTests(loader.loadTestsFromModule(auth_test)) + suite.addTests(loader.loadTestsFromModule(param_test)) + + return suite diff --git a/s3/tests/auth_test.py b/s3/tests/auth_test.py new file mode 100644 index 000000000..df3cfe5a8 --- /dev/null +++ b/s3/tests/auth_test.py @@ -0,0 +1,36 @@ +import os +import sys + +root = os.path.realpath(os.path.join(os.path.dirname(__file__), '../..')) +if root not in sys.path: + sys.path.append(root) + +from tests.helpers.ptrack_helpers import ProbackupTest + + +class AuthorizationTest(ProbackupTest): + """ + Check connect to S3 via pre_start_checks() function + calling pg_probackup init --s3 + + test that s3 keys allow to connect to all types of storages + """ + + def s3_auth_test(self): + console_output = self.pb.init(options=["--log-level-console=VERBOSE"]) + + self.assertNotIn(': 403', console_output) # Because we can have just '403' substring in timestamp + self.assertMessage(console_output, contains='S3_pre_start_check successful') + self.assertMessage(console_output, contains='HTTP response: 200') + self.assertIn( + f"INFO: Backup catalog '{self.backup_dir}' successfully initialized", + console_output) + + def test_log_level_file_requires_log_directory(self): + console_output = self.pb.init(options=["--log-level-file=VERBOSE"], + skip_log_directory=True, + expect_error=True) + + self.assertMessage(console_output, + contains='ERROR: Cannot save S3 logs to a file. You must specify --log-directory option when' + ' running backup with --log-level-file option enabled') diff --git a/s3/tests/custom_test.py b/s3/tests/custom_test.py new file mode 100644 index 000000000..a889cce77 --- /dev/null +++ b/s3/tests/custom_test.py @@ -0,0 +1,56 @@ +import os +import sys +import unittest +import subprocess + +root = os.path.realpath(os.path.join(os.path.dirname(__file__), '../..')) +if root not in sys.path: + sys.path.append(root) + +from tests.helpers.ptrack_helpers import ProbackupTest, fs_backup_class + +MULTIHOST_CONF_FILE = 's3/tests/multihost.conf' + +class CustomTest(ProbackupTest): + """ + Class for custom tests for checking some S3 features. + """ + + @unittest.skip("This test is meant for manual use only. Comment this line for testing") + @unittest.skipIf(fs_backup_class.is_file_based, "This test can only be launched under S3") + def test_s3_multihost_pbckp_825(self): + """ + Test for checking multihost case. + !!! WARNING !!! For manual testing only. + For checking multihost working you should comment first 'unittest.skip' + and fill proper IP addresses in file multihost.conf. + Also, it is recommended to set options in enviroment variables -- just in case. + """ + node = self.pg_node.make_simple('node', set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + node.pgbench_init(scale=2) + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + before = node.table_checksum("pgbench_branches") + backup_id = self.pb.backup_node('node', node, options=["-j", "4", "--s3-config-file", MULTIHOST_CONF_FILE]) + before_pgdata = self.pgdata_content(node.data_dir) + + node.stop() + node.cleanup() + + restore_result = self.pb.restore_node('node', node, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) + after_pgdata = self.pgdata_content(node.data_dir) + + node.slow_start() + + after = node.table_checksum("pgbench_branches") + self.assertEqual(before, after) + self.compare_pgdata(before_pgdata, after_pgdata) diff --git a/s3/tests/multihost.conf b/s3/tests/multihost.conf new file mode 100644 index 000000000..20fadf4be --- /dev/null +++ b/s3/tests/multihost.conf @@ -0,0 +1,7 @@ +access-key=minioadmin +secret-key=minioadmin +s3-host=127.0.0.1:9000;192.168.1.38 +s3-port=9000 +s3-bucket=s3test +s3-region=us-east-1 +s3-secure=OFF diff --git a/s3/tests/param_test.py b/s3/tests/param_test.py new file mode 100644 index 000000000..825278083 --- /dev/null +++ b/s3/tests/param_test.py @@ -0,0 +1,93 @@ +import os +import sys + +root = os.path.realpath(os.path.join(os.path.dirname(__file__), '../..')) +if root not in sys.path: + sys.path.append(root) + +from tests.helpers.ptrack_helpers import ProbackupTest + +base_path = os.path.dirname(__file__) + + +# PG_PROBACKUP_S3_IGNORE_CERT_VER Don't verify the certificate host and peer +# PG_PROBACKUP_S3_CA_CERTIFICATE Trust to the path to Certificate Authority (CA) bundle +# PG_PROBACKUP_S3_CA_PATH Trust to the directory holding CA certificates +# PG_PROBACKUP_S3_CLIENT_CERT Setup SSL client certificate +# PG_PROBACKUP_S3_CLIENT_KEY Setup private key file for TLS and SSL client certificate +class ParametersTest(ProbackupTest): + + def test_s3_config_file(self): + node = self.pg_node.make_simple('node') + node.slow_start() + config_file = os.path.join(base_path, 'test_data/configs/s3_config.conf') + + self.pb.init(options=[f"--s3-config-file={config_file}"]) + + self.pb.add_instance('node', node) + self.backup_and_validate(node) + + def test_s3_ignore_cert_verification(self): + node = self.pg_node.make_simple('node') + node.slow_start() + config_file = os.path.join(base_path, 'test_data/configs/ignore_cert_verification.conf') + + init_out = self.pb.init(options=["--log-level-console=VERBOSE", + f"--s3-config-file={config_file}"]) + self.assertMessage(init_out, contains="Turn off SSL verification") + + self.pb.add_instance('node', node) + self.backup_and_validate(node) + + def test_s3_ca_certificate(self): + node = self.pg_node.make_simple('node') + node.slow_start() + config_file = os.path.join(base_path, 'test_data/configs/ca_certificate.conf') + + init_out = self.pb.init(options=["--log-level-console=VERBOSE", + f"--s3-config-file={config_file}"]) + self.assertMessage(init_out, contains=f"Trust to the path to CA bundle: ../certificate.pem") + self.pb.add_instance('node', node) + self.backup_and_validate(node) + + def test_s3_ca_path(self): + node = self.pg_node.make_simple('node') + node.slow_start() + config_file = os.path.join(base_path, 'test_data/configs/ca_path.conf') + + init_out = self.pb.init(options=["--log-level-console=VERBOSE", + f"--s3-config-file={config_file}"]) + self.assertMessage(init_out, contains=f"Trust to the dir with CA certificates: ../") + + self.pb.add_instance('node', node) + self.backup_and_validate(node) + + def test_s3_ssl_client_cert(self): + node = self.pg_node.make_simple('node') + node.slow_start() + config_file = os.path.join(base_path, 'test_data/configs/ssl_client_cert.conf') + + init_out = self.pb.init(options=["--log-level-console=VERBOSE", + f"--s3-config-file={config_file}"]) + self.assertMessage(init_out, contains=f"Setup SSL client certificate: ../certificate_request.csr") + + self.pb.add_instance('node', node) + self.backup_and_validate(node) + + def test_s3_ssl_client_key(self): + node = self.pg_node.make_simple('node') + node.slow_start() + config_file = os.path.join(base_path, 'test_data/configs/ssl_client_key.conf') + + init_out = self.pb.init(options=["--log-level-console=VERBOSE", + f"--s3-config-file={config_file}"]) + self.assertMessage(init_out, contains=f"Setup private key file for TLS and SSL client certificate: " + f"../private_key.pem") + + self.pb.add_instance('node', node) + self.backup_and_validate(node) + + def backup_and_validate(self, node): + backup_id = self.pb.backup_node('node', node, options=['--stream']) + out = self.pb.validate('node', backup_id) + self.assertIn("INFO: Backup {0} is valid".format(backup_id), out) diff --git a/s3/tests/pytest.ini b/s3/tests/pytest.ini new file mode 100644 index 000000000..aeebbd1be --- /dev/null +++ b/s3/tests/pytest.ini @@ -0,0 +1,6 @@ +[pytest] +log_cli = true +log_cli_level = INFO +log_format = %(asctime)s %(levelname)s %(message)s +log_date_format = %Y-%m-%d %H:%M:%S +testpaths = tests diff --git a/s3/tests/s3.conf b/s3/tests/s3.conf new file mode 100644 index 000000000..8e3a53586 --- /dev/null +++ b/s3/tests/s3.conf @@ -0,0 +1,7 @@ +access-key=minioadmin +secret-key=minioadmin +s3-host=10.5.52.86 +s3-port=9000 +s3-bucket=test2 +s3-region=us-west-2 +s3-secure=OFF diff --git a/s3/tests/test_data/certificate.pem b/s3/tests/test_data/certificate.pem new file mode 100644 index 000000000..49e87c737 --- /dev/null +++ b/s3/tests/test_data/certificate.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDKzCCAhMCFDBMfPPaGS25WPTYiKGqOfrJFs97MA0GCSqGSIb3DQEBCwUAMFIx +CzAJBgNVBAYTAlJVMQ8wDQYDVQQIDAZNb3Njb3cxDzANBgNVBAcMBk1vc2NvdzEN +MAsGA1UECgwEd2Q0MDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI0MDIxOTE1MjM1 +MVoXDTMyMDcxMTE1MjM1MVowUjELMAkGA1UEBhMCUlUxDzANBgNVBAgMBk1vc2Nv +dzEPMA0GA1UEBwwGTW9zY293MQ0wCwYDVQQKDAR3ZDQwMRIwEAYDVQQDDAlsb2Nh +bGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAe8TovfAbX3Cf +jZozB7sct0hbbEmTL/qR5TGxBzLs4wZ2GmtuV8y6j8VotrVfeev9StqHL5wtVXO+ +aJPjRcfNcSF8PxVeBXVFz9Bnzi3PMNZHp/jlkTTgQ9O7l6pBIyksa+aOFmrypGMn +yC7Fcx7NORcia4Nz8cHjzAzyxPXnQxI7Y7DnJpwAgNc1gjWfAPWl97We2daXGBKW +PMinRs5qxc5fKgFRpKSjgU2I6cy5dxZtXovHXGJt6RoeKMB9U/YvGwsflCO8hvI2 +AXUtlmWhk7n55K2Ac4loPqdsCyNL6yvgRcBvRI7lnzdW4Ix8dhXZdaCI+L/ZTVSj +Obt6S4IlAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAHl3H4yaiQccehquM2lEW1IQ +ZoLJ6xApqoT3mVfQA+ohrqxlFOpEtycOk+QaLyXs5pKmA8VA9csReAdJcsNHJ+oV +6YSuCl1mc8odphUHJvMkvWBLYcmmqbj7ZVdXMW3QWH/z71ZOzQNJ4up7KkC10fMm +JrM94iSTIEsje+43p5KlVjTotL3nj/HU2xZVhuCknweed6E/V9uURzC0oLcU0FSU +fSL723A+CqhpYtHxODOZ9bMI6OSYD6RFx3r9E4ZY8se1FpzhaukX2h4stDAyqa70 +vFa1Zv89ZMvmu2htsiiGaMVQZcOaV+FfeEqo/HSdmAo9ZtJuUx/dPL3M5l8FJTs= +-----END CERTIFICATE----- diff --git a/s3/tests/test_data/certificate_request.csr b/s3/tests/test_data/certificate_request.csr new file mode 100644 index 000000000..1a4e840be --- /dev/null +++ b/s3/tests/test_data/certificate_request.csr @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIClzCCAX8CAQAwUjELMAkGA1UEBhMCUlUxDzANBgNVBAgMBk1vc2NvdzEPMA0G +A1UEBwwGTW9zY293MQ0wCwYDVQQKDAR3ZDQwMRIwEAYDVQQDDAlsb2NhbGhvc3Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAe8TovfAbX3CfjZozB7sc +t0hbbEmTL/qR5TGxBzLs4wZ2GmtuV8y6j8VotrVfeev9StqHL5wtVXO+aJPjRcfN +cSF8PxVeBXVFz9Bnzi3PMNZHp/jlkTTgQ9O7l6pBIyksa+aOFmrypGMnyC7Fcx7N +ORcia4Nz8cHjzAzyxPXnQxI7Y7DnJpwAgNc1gjWfAPWl97We2daXGBKWPMinRs5q +xc5fKgFRpKSjgU2I6cy5dxZtXovHXGJt6RoeKMB9U/YvGwsflCO8hvI2AXUtlmWh +k7n55K2Ac4loPqdsCyNL6yvgRcBvRI7lnzdW4Ix8dhXZdaCI+L/ZTVSjObt6S4Il +AgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEAnwxD7vfrWZ5Ft+U2aa9DPZYoIezz +r+JafWFDzW9HIkT8GckD6Advhz+6J908GwXAbL31g6Vt07gG7uvznl/++xKWc+Ub +dBsWeSZj5WRLB4DS91y4soI2jp0EXOa8j7m3sGNmALrxNHYqP5KbL52jWG42soPl +jTIUJ3LdvybmLALmI7yPxIbYO5y80nNCy4ovrksW61LKgyEyPMtcLFxYSp5lqIR1 +oTVEJ9UuReXRQWj88U4HIXFdKn0hhHEQBvvfRCHOvr45/7pAmCL2Tf7Gt1ClxcCo +LiO3vUc0pcb8sD4BR/OsSt1D0GQS/w3RO6CT4AzjWVl6rry8zPdKFaF2Cg== +-----END CERTIFICATE REQUEST----- diff --git a/s3/tests/test_data/configs/ca_certificate.conf b/s3/tests/test_data/configs/ca_certificate.conf new file mode 100644 index 000000000..7a2a60873 --- /dev/null +++ b/s3/tests/test_data/configs/ca_certificate.conf @@ -0,0 +1,8 @@ +access-key=admin +secret-key=12345qwe +s3-host=minio +s3-port=9000 +s3-bucket=pg-probackup +s3-region=us-west-2 +s3-secure=OFF +s3-ca-certificate=../certificate.pem \ No newline at end of file diff --git a/s3/tests/test_data/configs/ca_certificate_2.conf b/s3/tests/test_data/configs/ca_certificate_2.conf new file mode 100644 index 000000000..3c642eef7 --- /dev/null +++ b/s3/tests/test_data/configs/ca_certificate_2.conf @@ -0,0 +1,8 @@ +access-key=minioadmin +secret-key=minioadmin +s3-host=10.5.52.86 +s3-port=9000 +s3-bucket=test2 +s3-region=us-west-2 +s3-secure=OFF +s3-ca-certificate=../certificate.pem \ No newline at end of file diff --git a/s3/tests/test_data/configs/ca_path.conf b/s3/tests/test_data/configs/ca_path.conf new file mode 100644 index 000000000..bef73e8b7 --- /dev/null +++ b/s3/tests/test_data/configs/ca_path.conf @@ -0,0 +1,8 @@ +access-key=admin +secret-key=12345qwe +s3-host=minio +s3-port=9000 +s3-bucket=pg-probackup +s3-region=us-west-2 +s3-secure=OFF +s3-ca-path=../ \ No newline at end of file diff --git a/s3/tests/test_data/configs/ignore_cert_verification.conf b/s3/tests/test_data/configs/ignore_cert_verification.conf new file mode 100644 index 000000000..c029610ed --- /dev/null +++ b/s3/tests/test_data/configs/ignore_cert_verification.conf @@ -0,0 +1,8 @@ +access-key=admin +secret-key=12345qwe +s3-host=minio +s3-port=9000 +s3-bucket=pg-probackup +s3-region=us-west-2 +s3-secure=OFF +s3-ignore-cert-ver=ON \ No newline at end of file diff --git a/s3/tests/test_data/configs/s3_config.conf b/s3/tests/test_data/configs/s3_config.conf new file mode 100644 index 000000000..e8530e41f --- /dev/null +++ b/s3/tests/test_data/configs/s3_config.conf @@ -0,0 +1,7 @@ +access-key=admin +secret-key=12345qwe +s3-host=minio +s3-port=9000 +s3-bucket=pg-probackup +s3-region=us-west-2 +s3-secure=OFF diff --git a/s3/tests/test_data/configs/ssl_client_cert.conf b/s3/tests/test_data/configs/ssl_client_cert.conf new file mode 100644 index 000000000..ce9643642 --- /dev/null +++ b/s3/tests/test_data/configs/ssl_client_cert.conf @@ -0,0 +1,8 @@ +access-key=admin +secret-key=12345qwe +s3-host=minio +s3-port=9000 +s3-bucket=pg-probackup +s3-region=us-west-2 +s3-secure=OFF +s3-client-cert =../certificate_request.csr \ No newline at end of file diff --git a/s3/tests/test_data/configs/ssl_client_key.conf b/s3/tests/test_data/configs/ssl_client_key.conf new file mode 100644 index 000000000..b960a0230 --- /dev/null +++ b/s3/tests/test_data/configs/ssl_client_key.conf @@ -0,0 +1,8 @@ +access-key=admin +secret-key=12345qwe +s3-host=minio +s3-port=9000 +s3-bucket=pg-probackup +s3-region=us-west-2 +s3-secure=OFF +s3-client-key=../private_key.pem \ No newline at end of file diff --git a/s3/tests/test_data/private_key.pem b/s3/tests/test_data/private_key.pem new file mode 100644 index 000000000..ce5765c7b --- /dev/null +++ b/s3/tests/test_data/private_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDAe8TovfAbX3Cf +jZozB7sct0hbbEmTL/qR5TGxBzLs4wZ2GmtuV8y6j8VotrVfeev9StqHL5wtVXO+ +aJPjRcfNcSF8PxVeBXVFz9Bnzi3PMNZHp/jlkTTgQ9O7l6pBIyksa+aOFmrypGMn +yC7Fcx7NORcia4Nz8cHjzAzyxPXnQxI7Y7DnJpwAgNc1gjWfAPWl97We2daXGBKW +PMinRs5qxc5fKgFRpKSjgU2I6cy5dxZtXovHXGJt6RoeKMB9U/YvGwsflCO8hvI2 +AXUtlmWhk7n55K2Ac4loPqdsCyNL6yvgRcBvRI7lnzdW4Ix8dhXZdaCI+L/ZTVSj +Obt6S4IlAgMBAAECggEAHuK9UetIX9wMok2CdLXE3HK8dY8Gr0t/lXGFa7aQAVrc +ao8KtgX4n+b58jd/GvbhWx9ruU12bf/MNr1pHYrQ6LJCgUFOGGwfyTfzZPS8fQaF +G7JkSdHm/iVkEDHh69nMfYhC0oHeX1jpYDTK7HvwcVW71JYT3cLpLbhMS5g3LQha +pkcDYYpLFxRLjVn8S53Hn/eI7sCAsuXmm7VJ0x1vf2Gaefq5cmR8rrObrQqQAyFk +jaeB3jwX0i0Hr//iSPMHYgJ+6yTlSB4vS1PPZfFTNsv/taNXxIPkl1olMvRBkkB4 +rJDCras2lqzoMZhzNgWrKtCe+MkHk2nwGj79OTNEAQKBgQDw6ujsr+PaCOzgvnrg +yioHaC+KGeBgiTm5dgfce9X4Wf/B8lvYUZsjnuk89701Px9dhlM5nMc9Ge69GQjx +okgY5NgNpqMvHJuwMrqTcRDd3xcZA7baDC4ViCVy9AIZRLbPAnHJuFI1Vng4TnN/ +f/ueiQjxdVN9GgxcjxjHPUBhQQKBgQDMiJ/sax8BbCa9p63NoqQFkaSCOasWWUVu +m0qyg3x1KDbchsZ7QVQ2pJMjWECgGmTHPWotHwtoK3L3VcL0IVHFmkIwQzHMF4JN +LJX7TbDVj0lZJ/X+4xfBslzeEfNPyQd/s6Em9CZXkNHq6aPBjrcBL507EYDVYjkw +2BH4naDD5QKBgA/CXhA1hUMa+FvPOfyAXPV1FD8FaE8ISvVdyi/bDekcAgXu06E3 +3V0cJQeu8PqNsYCNq94F6maNzrch7xrXUb4HIjbqYpxPU8PX3aqNZ9wLntgtDQYA +a/dOm5yEZ6q9evPZphXa7827ZUvRne6GMbByKFqbISX60Towxh8qW47BAoGBAKho +zpVR1qPLM1Lx3+/zvO5A752ycibitoLOJwE5tp6Y0CNgbclLD1Y2yFhP8FZjpRtQ +fD5O04ugSvdQjEeLr9LuoxUuHrCXoth8n7neTH5rjs90Ud7fic4ZWNIZIahONHh3 +pXd3jKKJQe8VFZroMdMuK15ZmSUZzVFkd9enopnpAoGAA2MLJPIKwJ+h/yCpadg0 +SyC4n11t58DEw+cn8ngAG6m7m5x7ZXj/Uj0WPz93/o+oSQjNAjLaIepr0gruvck7 +D+MLCFNtZ8V9SRd76XLoHsT4wpgEhHP91ld31PYtgB87qQkpC16+3xdy04diXlt5 +k7EcC7F1HZUopLkcC239SYc= +-----END PRIVATE KEY----- diff --git a/tests/CVE_2018_1058_test.py b/tests/CVE_2018_1058_test.py index cfd55cc60..3fa28ded1 100644 --- a/tests/CVE_2018_1058_test.py +++ b/tests/CVE_2018_1058_test.py @@ -1,19 +1,16 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest -class CVE_2018_1058(ProbackupTest, unittest.TestCase): +class CVE_2018_1058(ProbackupTest): # @unittest.skip("skip") def test_basic_default_search_path(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True) + node = self.pg_node.make_simple('node', checksum=False, set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -26,19 +23,16 @@ def test_basic_default_search_path(self): "END " "$$ LANGUAGE plpgsql") - self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) + self.pb.backup_node('node', node, backup_type='full', options=['--stream']) # @unittest.skip("skip") def test_basic_backup_modified_search_path(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True) - self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) + node = self.pg_node.make_simple('node', checksum=False, set_replication=True) + node.set_auto_conf(options={'search_path': 'public,pg_catalog'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -62,7 +56,7 @@ def test_basic_backup_modified_search_path(self): "$$ LANGUAGE plpgsql; " "CREATE VIEW public.pg_proc AS SELECT proname FROM public.pg_proc()") - self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) + self.pb.backup_node('node', node, backup_type='full', options=['--stream']) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: @@ -73,10 +67,8 @@ def test_basic_backup_modified_search_path(self): # @unittest.skip("skip") def test_basic_checkdb_modified_search_path(self): """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) + node = self.pg_node.make_simple('node') + node.set_auto_conf(options={'search_path': 'public,pg_catalog'}) node.slow_start() node.safe_psql( @@ -110,20 +102,11 @@ def test_basic_checkdb_modified_search_path(self): "CREATE VIEW public.pg_namespace AS SELECT * FROM public.pg_namespace();" ) - try: - self.checkdb_node( + self.pb.checkdb_node( options=[ '--amcheck', '--skip-block-validation', - '-d', 'postgres', '-p', str(node.port)]) - self.assertEqual( - 1, 0, - "Expecting Error because amcheck{,_next} not installed\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "WARNING: Extension 'amcheck' or 'amcheck_next' are not installed in database postgres", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + '-d', 'postgres', '-p', str(node.port)], + expect_error="because amcheck{,_next} not installed") + self.assertMessage(contains= + "WARNING: Extension 'amcheck' or 'amcheck_next' are not installed in database postgres") diff --git a/tests/Readme.md b/tests/Readme.md index 11c5272f9..589c00bad 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -45,12 +45,22 @@ Run long (time consuming) tests: export PG_PROBACKUP_LONG=ON Usage: - sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope + + sudo sysctl kernel.yama.ptrace_scope=0 # only active until the next reboot +or + sudo sed -i 's/ptrace_scope = 1/ptrace_scope = 0/' /etc/sysctl.d/10-ptrace.conf # set permanently, needs a reboot to take effect + # see https://www.kernel.org/doc/Documentation/security/Yama.txt for possible implications of setting this parameter permanently pip install testgres export PG_CONFIG=/path/to/pg_config python -m unittest [-v] tests[.specific_module][.class.test] ``` - +# Environment variables +| Variable | Possible values | Required | Default value | Description | +| - |------------------------------------| - | - |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| KEEP_LOGS | Any | No | Not set | If this variable is set to '1', 'y' or 'Y' then test logs are kept after the successful execution of a test, otherwise test logs are deleted upon the successful execution of a test | +| PG_PROBACKUP_S3_TEST | Not set, minio, VK | No | Not set | If set, specifies the type of the S3 storage for the tests, otherwise signifies to the tests that the storage is not an S3 one | +| PG_PROBACKUP_S3_CONFIG_FILE | Not set, path to config file, True | No | Not set | Specifies the path to an S3 configuration file. If set, all commands will include --s3-config-file='path'. If 'True', the default configuration file at ./s3/tests/s3.conf will be used | +| PGPROBACKUP_TMP_DIR | File path | No | tmp_dirs | The root of the temporary directory hierarchy where tests store data and logs. Relative paths start from the `tests` directory. | # Troubleshooting FAQ ## Python tests failure diff --git a/tests/__init__.py b/tests/__init__.py index c8d2c70c3..89f0c47a7 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -3,9 +3,9 @@ from . import init_test, merge_test, option_test, show_test, compatibility_test, \ backup_test, delete_test, delta_test, restore_test, validate_test, \ - retention_test, pgpro560_test, pgpro589_test, pgpro2068_test, false_positive_test, replica_test, \ - compression_test, page_test, ptrack_test, archive_test, exclude_test, cfs_backup_test, cfs_restore_test, \ - cfs_validate_backup_test, auth_test, time_stamp_test, logging_test, \ + retention_test, pgpro560_test, pgpro589_test, false_positive_test, replica_test, \ + compression_test, page_test, ptrack_test, archive_test, exclude_test, \ + auth_test, time_stamp_test, logging_test, \ locking_test, remote_test, external_test, config_test, checkdb_test, set_backup_test, incr_restore_test, \ catchup_test, CVE_2018_1058_test, time_consuming_test @@ -35,9 +35,6 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(compatibility_test)) suite.addTests(loader.loadTestsFromModule(checkdb_test)) suite.addTests(loader.loadTestsFromModule(config_test)) - suite.addTests(loader.loadTestsFromModule(cfs_backup_test)) - suite.addTests(loader.loadTestsFromModule(cfs_restore_test)) - suite.addTests(loader.loadTestsFromModule(cfs_validate_backup_test)) suite.addTests(loader.loadTestsFromModule(compression_test)) suite.addTests(loader.loadTestsFromModule(delete_test)) suite.addTests(loader.loadTestsFromModule(delta_test)) @@ -53,7 +50,6 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(page_test)) suite.addTests(loader.loadTestsFromModule(pgpro560_test)) suite.addTests(loader.loadTestsFromModule(pgpro589_test)) - suite.addTests(loader.loadTestsFromModule(pgpro2068_test)) suite.addTests(loader.loadTestsFromModule(remote_test)) suite.addTests(loader.loadTestsFromModule(replica_test)) suite.addTests(loader.loadTestsFromModule(restore_test)) diff --git a/tests/archive_test.py b/tests/archive_test.py index 00fd1f592..5cc744ea1 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -1,33 +1,31 @@ import os import shutil -import gzip import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException +from .helpers.ptrack_helpers import ProbackupTest, fs_backup_class, get_relative_path +from pg_probackup2.gdb import needs_gdb from .helpers.data_helpers import tail_file -from datetime import datetime, timedelta import subprocess from sys import exit from time import sleep -from distutils.dir_util import copy_tree +from pathlib import PurePath +from testgres import ProcessType -class ArchiveTest(ProbackupTest, unittest.TestCase): +class ArchiveTest(ProbackupTest): # @unittest.expectedFailure # @unittest.skip("skip") def test_pgpro434_1(self): """Description in jira issue PGPRO-434""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -37,25 +35,23 @@ def test_pgpro434_1(self): "generate_series(0,100) i") result = node.table_checksum("t_heap") - self.backup_node( - backup_dir, 'node', node) + self.pb.backup_node('node', node) node.cleanup() - self.restore_node( - backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() # Recreate backup catalog - self.clean_pb(backup_dir) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir.cleanup() + self.pb.init() + self.pb.add_instance('node', node) # Make backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.cleanup() # Restore Database - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() self.assertEqual( @@ -69,22 +65,15 @@ def test_pgpro434_2(self): Check that timelines are correct. WAITING PGPRO-1053 for --immediate """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'} ) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FIRST TIMELINE @@ -93,7 +82,7 @@ def test_pgpro434_2(self): "create table t_heap as select 1 as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,100) i") - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) node.safe_psql( "postgres", "insert into t_heap select 100501 as id, md5(i::text) as text, " @@ -102,8 +91,7 @@ def test_pgpro434_2(self): # SECOND TIMELIN node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=['--immediate', '--recovery-target-action=promote']) node.slow_start() @@ -124,7 +112,7 @@ def test_pgpro434_2(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(100,200) i") - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -134,8 +122,7 @@ def test_pgpro434_2(self): # THIRD TIMELINE node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=['--immediate', '--recovery-target-action=promote']) node.slow_start() @@ -151,7 +138,7 @@ def test_pgpro434_2(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(200,300) i") - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) result = node.table_checksum("t_heap") node.safe_psql( @@ -162,8 +149,7 @@ def test_pgpro434_2(self): # FOURTH TIMELINE node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=['--immediate', '--recovery-target-action=promote']) node.slow_start() @@ -175,8 +161,7 @@ def test_pgpro434_2(self): # FIFTH TIMELINE node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=['--immediate', '--recovery-target-action=promote']) node.slow_start() @@ -188,8 +173,7 @@ def test_pgpro434_2(self): # SIXTH TIMELINE node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=['--immediate', '--recovery-target-action=promote']) node.slow_start() @@ -209,29 +193,25 @@ def test_pgpro434_2(self): 'data after restore not equal to original data') # @unittest.skip("skip") + @needs_gdb def test_pgpro434_3(self): """ Check pg_stop_backup_timeout, needed backup_timeout Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - gdb = self.backup_node( - backup_dir, 'node', node, + gdb = self.pb.backup_node('node', node, options=[ - "--archive-timeout=60", + "--archive-timeout=10", "--log-level-file=LOG"], gdb=True) @@ -239,26 +219,20 @@ def test_pgpro434_3(self): gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - self.set_auto_conf(node, {'archive_command': 'exit 1'}) + node.set_auto_conf({'archive_command': 'exit 1'}) node.reload() + sleep(1) + gdb.continue_execution_until_exit() sleep(1) - log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file, 'r') as f: - log_content = f.read() + log_content = self.read_pb_log() - # in PG =< 9.6 pg_stop_backup always wait - if self.get_version(node) < 100000: - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) - else: - self.assertIn( - "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", - log_content) + self.assertIn( + "ERROR: WAL segment 000000010000000000000003 could not be archived in 10 seconds", + log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: @@ -270,29 +244,25 @@ def test_pgpro434_3(self): 'PostgreSQL crashed because of a failed assert') # @unittest.skip("skip") + @needs_gdb def test_pgpro434_4(self): """ Check pg_stop_backup_timeout, libpq-timeout requested. Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - gdb = self.backup_node( - backup_dir, 'node', node, + gdb = self.pb.backup_node('node', node, options=[ - "--archive-timeout=60", + "--archive-timeout=10", "--log-level-file=info"], gdb=True) @@ -300,7 +270,7 @@ def test_pgpro434_4(self): gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - self.set_auto_conf(node, {'archive_command': 'exit 1'}) + node.set_auto_conf({'archive_command': 'exit 1'}) node.reload() os.environ["PGAPPNAME"] = "foo" @@ -314,26 +284,23 @@ def test_pgpro434_4(self): os.environ["PGAPPNAME"] = "pg_probackup" postgres_gdb = self.gdb_attach(pid) - if self.get_version(node) < 150000: + if self.pg_config_version < 150000: postgres_gdb.set_breakpoint('do_pg_stop_backup') else: postgres_gdb.set_breakpoint('do_pg_backup_stop') postgres_gdb.continue_execution_until_running() gdb.continue_execution_until_exit() - # gdb._execute('detach') - log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file, 'r') as f: - log_content = f.read() + log_content = self.read_pb_log() - if self.get_version(node) < 150000: + if self.pg_config_version < 150000: self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", + "ERROR: pg_stop_backup doesn't answer in 10 seconds, cancel it", log_content) else: self.assertIn( - "ERROR: pg_backup_stop doesn't answer in 60 seconds, cancel it", + "ERROR: pg_backup_stop doesn't answer in 10 seconds, cancel it", log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') @@ -348,30 +315,20 @@ def test_pgpro434_4(self): # @unittest.skip("skip") def test_archive_push_file_exists(self): """Archive-push if file exists""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) - wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.archive_compress: - filename = '000000010000000000000001.gz' - file = os.path.join(wals_dir, filename) - else: - filename = '000000010000000000000001' - file = os.path.join(wals_dir, filename) - - with open(file, 'a+b') as f: - f.write(b"blablablaadssaaaaaaaaaaaaaaa") - f.flush() - f.close() + suffix = self.compress_suffix + walfile = '000000010000000000000001'+suffix + self.write_instance_wal(backup_dir, 'node', walfile, + b"blablablaadssaaaaaaaaaaaaaaa") node.slow_start() node.safe_psql( @@ -382,7 +339,6 @@ def test_archive_push_file_exists(self): log_file = os.path.join(node.logs_dir, 'postgresql.log') self.switch_wal_segment(node) - sleep(1) log = tail_file(log_file, linetimeout=30, totaltimeout=120, collect=True) @@ -400,61 +356,50 @@ def test_archive_push_file_exists(self): 'pg_probackup archive-push WAL file', log.content) - self.assertIn( - 'WAL file already exists in archive with different checksum', - log.content) + if self.archive_compress: + self.assertIn( + 'WAL file already exists and looks like it is damaged', + log.content) + else: + self.assertIn( + 'WAL file already exists in archive with different checksum', + log.content) self.assertNotIn( 'pg_probackup archive-push completed successfully', log.content) # btw check that console coloring codes are not slipped into log file self.assertNotIn('[0m', log.content) + log.stop_collect() - if self.get_version(node) < 100000: - wal_src = os.path.join( - node.data_dir, 'pg_xlog', '000000010000000000000001') - else: - wal_src = os.path.join( - node.data_dir, 'pg_wal', '000000010000000000000001') + wal_src = os.path.join( + node.data_dir, 'pg_wal', '000000010000000000000001') + with open(wal_src, 'rb') as f_in: + file_content = f_in.read() - if self.archive_compress: - with open(wal_src, 'rb') as f_in, gzip.open( - file, 'wb', compresslevel=1) as f_out: - shutil.copyfileobj(f_in, f_out) - else: - shutil.copyfile(wal_src, file) + self.write_instance_wal(backup_dir, 'node', walfile, file_content, + compress = self.archive_compress) self.switch_wal_segment(node) - log.stop_collect() log.wait(contains = 'pg_probackup archive-push completed successfully') # @unittest.skip("skip") def test_archive_push_file_exists_overwrite(self): """Archive-push if file exists""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) - wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.archive_compress: - filename = '000000010000000000000001.gz' - file = os.path.join(wals_dir, filename) - else: - filename = '000000010000000000000001' - file = os.path.join(wals_dir, filename) - - with open(file, 'a+b') as f: - f.write(b"blablablaadssaaaaaaaaaaaaaaa") - f.flush() - f.close() + suffix = self.compress_suffix + walfile = '000000010000000000000001'+suffix + self.write_instance_wal(backup_dir, 'node', walfile, + b"blablablaadssaaaaaaaaaaaaaaa") node.slow_start() node.safe_psql( @@ -465,7 +410,6 @@ def test_archive_push_file_exists_overwrite(self): log_file = os.path.join(node.logs_dir, 'postgresql.log') self.switch_wal_segment(node) - sleep(1) log = tail_file(log_file, linetimeout=30, collect=True) log.wait(contains = 'The failed archive command was') @@ -476,118 +420,47 @@ def test_archive_push_file_exists_overwrite(self): 'DETAIL: The failed archive command was:', log.content) self.assertIn( 'pg_probackup archive-push WAL file', log.content) - self.assertNotIn( - 'WAL file already exists in archive with ' - 'different checksum, overwriting', log.content) - self.assertIn( - 'WAL file already exists in archive with ' - 'different checksum', log.content) + self.assertNotIn('overwriting', log.content) + if self.archive_compress: + self.assertIn( + 'WAL file already exists and looks like ' + 'it is damaged', log.content) + else: + self.assertIn( + 'WAL file already exists in archive with ' + 'different checksum', log.content) self.assertNotIn( 'pg_probackup archive-push completed successfully', log.content) - self.set_archiving(backup_dir, 'node', node, overwrite=True) + self.pb.set_archiving('node', node, overwrite=True) node.reload() self.switch_wal_segment(node) log.drop_content() log.wait(contains = 'pg_probackup archive-push completed successfully') - self.assertIn( - 'WAL file already exists in archive with ' - 'different checksum, overwriting', log.content) - - # @unittest.skip("skip") - def test_archive_push_partial_file_exists(self): - """Archive-push if stale '.part' file exists""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving( - backup_dir, 'node', node, - log_level='verbose', archive_timeout=60) - - node.slow_start() - - # this backup is needed only for validation to xid - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t1(a int)") - - xid = node.safe_psql( - "postgres", - "INSERT INTO t1 VALUES (1) RETURNING (xmin)").decode('utf-8').rstrip() - - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() - - filename_orig = filename_orig.decode('utf-8') - - # form up path to next .part WAL segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') if self.archive_compress: - filename = filename_orig + '.gz' + '.part' - file = os.path.join(wals_dir, filename) + self.assertIn( + 'WAL file already exists and looks like ' + 'it is damaged, overwriting', log.content) else: - filename = filename_orig + '.part' - file = os.path.join(wals_dir, filename) - - # emulate stale .part file - with open(file, 'a+b') as f: - f.write(b"blahblah") - f.flush() - f.close() - - self.switch_wal_segment(node) - sleep(70) - - # check that segment is archived - if self.archive_compress: - filename_orig = filename_orig + '.gz' - - file = os.path.join(wals_dir, filename_orig) - self.assertTrue(os.path.isfile(file)) - - # successful validate means that archive-push reused stale wal segment - self.validate_pb( - backup_dir, 'node', - options=['--recovery-target-xid={0}'.format(xid)]) - - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - self.assertIn( - 'Reusing stale temp WAL file', - log_content) + 'WAL file already exists in archive with ' + 'different checksum, overwriting', log.content) - # @unittest.skip("skip") + @unittest.skip("should be redone with file locking") def test_archive_push_part_file_exists_not_stale(self): """Archive-push if .part file exists and it is not stale""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + # TODO: this test is not completely obsolete, but should be rewritten + # with use of file locking when push_file_internal will use it. + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, archive_timeout=60) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node, archive_timeout=60) node.slow_start() @@ -600,27 +473,17 @@ def test_archive_push_part_file_exists_not_stale(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') # form up path to next .part WAL segment wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.archive_compress: - filename = filename_orig + '.gz' + '.part' - file = os.path.join(wals_dir, filename) - else: - filename = filename_orig + '.part' - file = os.path.join(wals_dir, filename) + filename = filename_orig + self.compress_suffix + '.part' + file = os.path.join(wals_dir, filename) with open(file, 'a+b') as f: f.write(b"blahblah") @@ -638,8 +501,7 @@ def test_archive_push_part_file_exists_not_stale(self): sleep(40) # check that segment is NOT archived - if self.archive_compress: - filename_orig = filename_orig + '.gz' + filename_orig += self.compress_suffix file = os.path.join(wals_dir, filename_orig) @@ -654,33 +516,27 @@ def test_archive_push_part_file_exists_not_stale(self): # @unittest.expectedFailure # @unittest.skip("skip") + @needs_gdb def test_replica_archive(self): """ make node without archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '10s', 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) + self.pb.init() # ADD INSTANCE 'MASTER' - self.add_instance(backup_dir, 'master', master) + self.pb.add_instance('master', master) master.slow_start() - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() master.psql( @@ -689,15 +545,15 @@ def test_replica_archive(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,2560) i") - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) before = master.table_checksum("t_heap") # Settings for Replica - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) self.set_replica(master, replica, synchronous=True) - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.pb.add_instance('replica', replica) + self.pb.set_archiving('replica', replica, replica=True) replica.slow_start(replica=True) # Check data correctness on replica @@ -714,26 +570,21 @@ def test_replica_archive(self): "from generate_series(256,512) i") before = master.table_checksum("t_heap") - backup_id = self.backup_node( - backup_dir, 'replica', replica, + backup_id = self.pb.backup_node('replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) - self.validate_pb(backup_dir, 'replica') + self.pb.validate('replica') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + 'OK', self.pb.show('replica', backup_id)['status']) # RESTORE FULL BACKUP TAKEN FROM replica - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + node = self.pg_node.make_simple('node') node.cleanup() - self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) + self.pb.restore_node('replica', node=node) - self.set_auto_conf(node, {'port': node.port}) + node.set_auto_conf({'port': node.port}) node.slow_start() # CHECK DATA CORRECTNESS after = node.table_checksum("t_heap") @@ -752,26 +603,22 @@ def test_replica_archive(self): self.wait_until_replica_catch_with_master(master, replica) - backup_id = self.backup_node( - backup_dir, 'replica', + backup_id, _ = self.pb.backup_replica_node('replica', replica, backup_type='page', + master=master, options=[ '--archive-timeout=60', - '--master-db=postgres', - '--master-host=localhost', - '--master-port={0}'.format(master.port), '--stream']) - self.validate_pb(backup_dir, 'replica') + self.pb.validate('replica') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + 'OK', self.pb.show('replica', backup_id)['status']) # RESTORE PAGE BACKUP TAKEN FROM replica node.cleanup() - self.restore_node( - backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id) + self.pb.restore_node('replica', node, backup_id=backup_id) - self.set_auto_conf(node, {'port': node.port}) + node.set_auto_conf({'port': node.port}) node.slow_start() # CHECK DATA CORRECTNESS @@ -787,27 +634,19 @@ def test_master_and_replica_parallel_archiving(self): set replica with archiving, make archive backup from replica, make archive backup from master """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '10s'} ) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.init_pb(backup_dir) + self.pb.init() # ADD INSTANCE 'MASTER' - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() master.psql( @@ -817,23 +656,23 @@ def test_master_and_replica_parallel_archiving(self): "from generate_series(0,10000) i") # TAKE FULL ARCHIVE BACKUP FROM MASTER - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) # GET LOGICAL CONTENT FROM MASTER before = master.table_checksum("t_heap") # GET PHYSICAL CONTENT FROM MASTER pgdata_master = self.pgdata_content(master.data_dir) # Settings for Replica - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) # CHECK PHYSICAL CORRECTNESS on REPLICA pgdata_replica = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata_master, pgdata_replica) self.set_replica(master, replica) # ADD INSTANCE REPLICA - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) # SET ARCHIVING FOR REPLICA - self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.pb.set_archiving('replica', replica, replica=True) replica.slow_start(replica=True) # CHECK LOGICAL CORRECTNESS on REPLICA @@ -846,27 +685,24 @@ def test_master_and_replica_parallel_archiving(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0, 60000) i") - backup_id = self.backup_node( - backup_dir, 'replica', replica, + backup_id = self.pb.backup_node('replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) - self.validate_pb(backup_dir, 'replica') + self.pb.validate('replica') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + 'OK', self.pb.show('replica', backup_id)['status']) # TAKE FULL ARCHIVE BACKUP FROM MASTER - backup_id = self.backup_node(backup_dir, 'master', master) - self.validate_pb(backup_dir, 'master') + backup_id = self.pb.backup_node('master', master) + self.pb.validate('master') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) + 'OK', self.pb.show('master', backup_id)['status']) # @unittest.expectedFailure # @unittest.skip("skip") + @needs_gdb def test_basic_master_and_replica_concurrent_archiving(self): """ make node 'master 'with archiving, @@ -874,30 +710,19 @@ def test_basic_master_and_replica_concurrent_archiving(self): set replica with archiving, make sure that archiving on both node is working. """ - if self.pg_config_version < self.version_to_num('9.6.0'): - self.skipTest('You need PostgreSQL >= 9.6 for this test') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.init_pb(backup_dir) + self.pb.init() # ADD INSTANCE 'MASTER' - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() master.psql( @@ -909,29 +734,33 @@ def test_basic_master_and_replica_concurrent_archiving(self): master.pgbench_init(scale=5) # TAKE FULL ARCHIVE BACKUP FROM MASTER - self.backup_node(backup_dir, 'master', master) - # GET LOGICAL CONTENT FROM MASTER - before = master.table_checksum("t_heap") + self.pb.backup_node('master', master) # GET PHYSICAL CONTENT FROM MASTER - pgdata_master = self.pgdata_content(master.data_dir) + master.stop() + pgdata_master = self.pgdata_content(master.data_dir, exclude_dirs = ['pg_stat']) + master.start() # Settings for Replica - self.restore_node( - backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) # CHECK PHYSICAL CORRECTNESS on REPLICA - pgdata_replica = self.pgdata_content(replica.data_dir) - self.compare_pgdata(pgdata_master, pgdata_replica) + pgdata_replica = self.pgdata_content(replica.data_dir, exclude_dirs = ['pg_stat']) self.set_replica(master, replica, synchronous=False) # ADD INSTANCE REPLICA - # self.add_instance(backup_dir, 'replica', replica) + # self.pb.add_instance('replica', replica) # SET ARCHIVING FOR REPLICA - self.set_archiving(backup_dir, 'master', replica, replica=True) + self.pb.set_archiving('master', replica, replica=True) replica.slow_start(replica=True) + + # GET LOGICAL CONTENT FROM MASTER + before = master.table_checksum("t_heap") # CHECK LOGICAL CORRECTNESS on REPLICA after = replica.table_checksum("t_heap") - self.assertEqual(before, after) + + # self.assertEqual(before, after) + if before != after: + self.compare_pgdata(pgdata_master, pgdata_replica) master.psql( "postgres", @@ -939,18 +768,22 @@ def test_basic_master_and_replica_concurrent_archiving(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") + # freeze bgwriter to get rid of RUNNING XACTS records + bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] + gdb_bgwriter = self.gdb_attach(bgwriter_pid) + # TAKE FULL ARCHIVE BACKUP FROM REPLICA - backup_id = self.backup_node(backup_dir, 'master', replica) + backup_id = self.pb.backup_node('master', replica) - self.validate_pb(backup_dir, 'master') + self.pb.validate('master') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) + 'OK', self.pb.show('master', backup_id)['status']) # TAKE FULL ARCHIVE BACKUP FROM MASTER - backup_id = self.backup_node(backup_dir, 'master', master) - self.validate_pb(backup_dir, 'master') + backup_id = self.pb.backup_node('master', master) + self.pb.validate('master') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) + 'OK', self.pb.show('master', backup_id)['status']) master.pgbench_init(scale=10) @@ -961,8 +794,11 @@ def test_basic_master_and_replica_concurrent_archiving(self): master.pgbench_init(scale=10) replica.pgbench_init(scale=10) - self.backup_node(backup_dir, 'master', master) - self.backup_node(backup_dir, 'master', replica) + self.pb.backup_node('master', master) + self.pb.backup_node('master', replica, data_dir=replica.data_dir) + + # Clean after yourself + gdb_bgwriter.detach() # @unittest.expectedFailure # @unittest.skip("skip") @@ -977,55 +813,50 @@ def test_concurrent_archiving(self): if self.pg_config_version < self.version_to_num('11.0'): self.skipTest('You need PostgreSQL >= 11 for this test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) + master = self.pg_node.make_simple('master', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master, replica=True) + self.pb.init() + self.pb.add_instance('node', master) + self.pb.set_archiving('node', master, replica=True) master.slow_start() master.pgbench_init(scale=10) # TAKE FULL ARCHIVE BACKUP FROM MASTER - self.backup_node(backup_dir, 'node', master) + self.pb.backup_node('node', master) # Settings for Replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'node', replica) + self.pb.restore_node('node', node=replica) self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) - self.set_auto_conf(replica, {'port': replica.port}) + self.pb.set_archiving('node', replica, replica=True) + replica.set_auto_conf({'port': replica.port}) replica.slow_start(replica=True) # create cascade replicas - replica1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica1')) + replica1 = self.pg_node.make_simple('replica1') replica1.cleanup() # Settings for casaced replica - self.restore_node(backup_dir, 'node', replica1) + self.pb.restore_node('node', node=replica1) self.set_replica(replica, replica1, synchronous=False) - self.set_auto_conf(replica1, {'port': replica1.port}) + replica1.set_auto_conf({'port': replica1.port}) replica1.slow_start(replica=True) # Take full backup from master - self.backup_node(backup_dir, 'node', master) + self.pb.backup_node('node', master) pgbench = master.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, options=['-T', '30', '-c', '1']) # Take several incremental backups from master - self.backup_node(backup_dir, 'node', master, backup_type='page', options=['--no-validate']) + self.pb.backup_node('node', master, backup_type='page', options=['--no-validate']) - self.backup_node(backup_dir, 'node', master, backup_type='page', options=['--no-validate']) + self.pb.backup_node('node', master, backup_type='page', options=['--no-validate']) pgbench.wait() pgbench.stdout.close() @@ -1046,23 +877,21 @@ def test_concurrent_archiving(self): # @unittest.skip("skip") def test_archive_pg_receivexlog(self): """Test backup with pg_receivexlog wal delivary method""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + if not backup_dir.is_file_based: + self.skipTest('test has no meaning for cloud storage') + + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') - pg_receivexlog = self.run_binary( + pg_receivexlog = self.pb.run_binary( [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', '-D', os.path.join(backup_dir, 'wal', 'node') @@ -1080,7 +909,7 @@ def test_archive_pg_receivexlog(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # PAGE node.safe_psql( @@ -1089,18 +918,17 @@ def test_archive_pg_receivexlog(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(10000,20000) i") - self.backup_node( - backup_dir, + self.pb.backup_node( 'node', node, backup_type='page' ) result = node.table_checksum("t_heap") - self.validate_pb(backup_dir) + self.pb.validate() # Check data correctness node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() self.assertEqual( @@ -1115,23 +943,21 @@ def test_archive_pg_receivexlog(self): # @unittest.skip("skip") def test_archive_pg_receivexlog_compression_pg10(self): """Test backup with pg_receivewal compressed wal delivary method""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + if not backup_dir.is_file_based: + self.skipTest('test has no meaning for cloud storage') + + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'} ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - if self.get_version(node) < self.version_to_num('10.0'): - self.skipTest('You need PostgreSQL >= 10 for this test') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') - pg_receivexlog = self.run_binary( + pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog = self.pb.run_binary( [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', '-Z', '9', '-D', os.path.join(backup_dir, 'wal', 'node') @@ -1149,7 +975,7 @@ def test_archive_pg_receivexlog_compression_pg10(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # PAGE node.safe_psql( @@ -1158,16 +984,15 @@ def test_archive_pg_receivexlog_compression_pg10(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(10000,20000) i") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='page' ) result = node.table_checksum("t_heap") - self.validate_pb(backup_dir) + self.pb.validate() # Check data correctness node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() self.assertEqual( @@ -1196,22 +1021,16 @@ def test_archive_catalog(self): ARCHIVE master: t1 -Z1--Z2--- """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s', 'checkpoint_timeout': '30s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() @@ -1222,7 +1041,7 @@ def test_archive_catalog(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) # PAGE master.safe_psql( @@ -1231,42 +1050,33 @@ def test_archive_catalog(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(10000,20000) i") - self.backup_node( - backup_dir, 'master', master, backup_type='page') + self.pb.backup_node('master', master, backup_type='page') - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) self.set_replica(master, replica) - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) + self.pb.add_instance('replica', replica) + self.pb.set_archiving('replica', replica, replica=True) replica.slow_start(replica=True) # FULL backup replica - Y1 = self.backup_node( - backup_dir, 'replica', replica, + Y1 = self.pb.backup_node('replica', replica, options=['--stream', '--archive-timeout=60s']) master.pgbench_init(scale=5) # PAGE backup replica - Y2 = self.backup_node( - backup_dir, 'replica', replica, + Y2 = self.pb.backup_node('replica', replica, backup_type='page', options=['--stream', '--archive-timeout=60s']) # create timeline t2 replica.promote() # FULL backup replica - A1 = self.backup_node( - backup_dir, 'replica', replica) + A1 = self.pb.backup_node('replica', replica) replica.pgbench_init(scale=5) @@ -1282,13 +1092,11 @@ def test_archive_catalog(self): target_xid = res[0][0] # DELTA backup replica - A2 = self.backup_node( - backup_dir, 'replica', replica, backup_type='delta') + A2 = self.pb.backup_node('replica', replica, backup_type='delta') # create timeline t3 replica.cleanup() - self.restore_node( - backup_dir, 'replica', replica, + self.pb.restore_node('replica', replica, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=2', @@ -1296,13 +1104,11 @@ def test_archive_catalog(self): replica.slow_start() - B1 = self.backup_node( - backup_dir, 'replica', replica) + B1 = self.pb.backup_node('replica', replica) replica.pgbench_init(scale=2) - B2 = self.backup_node( - backup_dir, 'replica', replica, backup_type='page') + B2 = self.pb.backup_node('replica', replica, backup_type='page') replica.pgbench_init(scale=2) @@ -1313,15 +1119,13 @@ def test_archive_catalog(self): con.commit() target_xid = res[0][0] - B3 = self.backup_node( - backup_dir, 'replica', replica, backup_type='page') + B3 = self.pb.backup_node('replica', replica, backup_type='page') replica.pgbench_init(scale=2) # create timeline t4 replica.cleanup() - self.restore_node( - backup_dir, 'replica', replica, + self.pb.restore_node('replica', replica, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=3', @@ -1352,8 +1156,7 @@ def test_archive_catalog(self): # create timeline t5 replica.cleanup() - self.restore_node( - backup_dir, 'replica', replica, + self.pb.restore_node('replica', replica, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=4', @@ -1371,8 +1174,7 @@ def test_archive_catalog(self): # create timeline t6 replica.cleanup() - self.restore_node( - backup_dir, 'replica', replica, backup_id=A1, + self.pb.restore_node('replica', replica, backup_id=A1, options=[ '--recovery-target=immediate', '--recovery-target-action=promote']) @@ -1382,8 +1184,8 @@ def test_archive_catalog(self): sleep(5) - show = self.show_archive(backup_dir, as_text=True) - show = self.show_archive(backup_dir) + show = self.pb.show_archive(as_text=True) + show = self.pb.show_archive() for instance in show: if instance['instance'] == 'replica': @@ -1400,36 +1202,19 @@ def test_archive_catalog(self): for timeline in master_timelines: self.assertTrue(timeline['status'], 'OK') - # create holes in t3 - wals_dir = os.path.join(backup_dir, 'wal', 'replica') - wals = [ - f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) - and not f.endswith('.backup') and not f.endswith('.history') and f.startswith('00000003') - ] - wals.sort() - # check that t3 is ok - self.show_archive(backup_dir) + self.pb.show_archive() - file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000017') - if self.archive_compress: - file = file + '.gz' - os.remove(file) - - file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000012') - if self.archive_compress: - file = file + '.gz' - os.remove(file) - - file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000013') - if self.archive_compress: - file = file + '.gz' - os.remove(file) + # create holes in t3 + suffix = self.compress_suffix + self.remove_instance_wal(backup_dir, 'replica', '000000030000000000000017' + suffix) + self.remove_instance_wal(backup_dir, 'replica', '000000030000000000000012' + suffix) + self.remove_instance_wal(backup_dir, 'replica', '000000030000000000000013' + suffix) # check that t3 is not OK - show = self.show_archive(backup_dir) + show = self.pb.show_archive() - show = self.show_archive(backup_dir) + show = self.pb.show_archive() for instance in show: if instance['instance'] == 'replica': @@ -1514,37 +1299,33 @@ def test_archive_catalog_1(self): self.skipTest('You need to enable ARCHIVE_COMPRESSION ' 'for this test to run') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s', 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=True) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node, compress=True) node.slow_start() # FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=2) + tailer = tail_file(os.path.join(node.logs_dir, 'postgresql.log')) + tailer.wait_archive_push_completed() + node.stop() - wals_dir = os.path.join(backup_dir, 'wal', 'node') - original_file = os.path.join(wals_dir, '000000010000000000000001.gz') - tmp_file = os.path.join(wals_dir, '000000010000000000000001') - - with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - - os.rename( - os.path.join(wals_dir, '000000010000000000000001'), - os.path.join(wals_dir, '000000010000000000000002')) + file_content = self.read_instance_wal(backup_dir, 'node', + '000000010000000000000001'+self.compress_suffix, + decompress=True) + self.write_instance_wal(backup_dir, 'node', '000000010000000000000002', + file_content) - show = self.show_archive(backup_dir) + show = self.pb.show_archive() for instance in show: timelines = instance['timelines'] @@ -1566,39 +1347,37 @@ def test_archive_catalog_2(self): self.skipTest('You need to enable ARCHIVE_COMPRESSION ' 'for this test to run') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s', 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=True) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node, compress=True) node.slow_start() # FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=2) + tailer = tail_file(os.path.join(node.logs_dir, "postgresql.log")) + tailer.wait_archive_push_completed() + node.stop() - wals_dir = os.path.join(backup_dir, 'wal', 'node') - original_file = os.path.join(wals_dir, '000000010000000000000001.gz') - tmp_file = os.path.join(wals_dir, '000000010000000000000001') - - with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - - os.rename( - os.path.join(wals_dir, '000000010000000000000001'), - os.path.join(wals_dir, '000000010000000000000002')) + suffix = self.compress_suffix + file_content = self.read_instance_wal(backup_dir, 'node', + '000000010000000000000001'+suffix, + decompress=True) + self.write_instance_wal(backup_dir, 'node', '000000010000000000000002', + file_content) - os.remove(original_file) + self.remove_instance_wal(backup_dir, 'node', + '000000010000000000000001'+suffix) - show = self.show_archive(backup_dir) + show = self.pb.show_archive() for instance in show: timelines = instance['timelines'] @@ -1620,35 +1399,35 @@ def test_archive_options(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + if not backup_dir.is_file_based: + self.skipTest("Test has no meaning for cloud storage") + + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=True) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node, compress=True) node.slow_start() # FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=1) node.cleanup() wal_dir = os.path.join(backup_dir, 'wal', 'node') - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--restore-command="cp {0}/%f %p"'.format(wal_dir), '--archive-host=localhost', '--archive-port=22', - '--archive-user={0}'.format(self.user) + '--archive-user={0}'.format(self.username) ]) - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') else: recovery_conf = os.path.join(node.data_dir, 'recovery.conf') @@ -1662,12 +1441,11 @@ def test_archive_options(self): node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--archive-host=localhost', '--archive-port=22', - '--archive-user={0}'.format(self.user)]) + '--archive-user={0}'.format(self.username)]) with open(recovery_conf, 'r') as f: recovery_content = f.read() @@ -1676,7 +1454,7 @@ def test_archive_options(self): "restore_command = '\"{0}\" archive-get -B \"{1}\" --instance \"{2}\" " "--wal-file-path=%p --wal-file-name=%f --remote-host=localhost " "--remote-port=22 --remote-user={3}'".format( - self.probackup_path, backup_dir, 'node', self.user), + self.probackup_path, backup_dir, 'node', self.username), recovery_content) node.slow_start() @@ -1692,35 +1470,35 @@ def test_archive_options_1(self): check that '--archive-host', '--archive-user', '--archiver-port' and '--restore-command' are working as expected with set-config """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + if not backup_dir.is_file_based: + self.skipTest("Test has no meaning for cloud storage") + + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=True) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node, compress=True) node.slow_start() # FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=1) node.cleanup() wal_dir = os.path.join(backup_dir, 'wal', 'node') - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=[ '--restore-command="cp {0}/%f %p"'.format(wal_dir), '--archive-host=localhost', '--archive-port=22', - '--archive-user={0}'.format(self.user)]) - self.restore_node(backup_dir, 'node', node) + '--archive-user={0}'.format(self.username)]) + self.pb.restore_node('node', node=node) - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') else: recovery_conf = os.path.join(node.data_dir, 'recovery.conf') @@ -1734,13 +1512,12 @@ def test_archive_options_1(self): node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--restore-command=none', '--archive-host=localhost1', '--archive-port=23', - '--archive-user={0}'.format(self.user) + '--archive-user={0}'.format(self.username) ]) with open(recovery_conf, 'r') as f: @@ -1750,7 +1527,7 @@ def test_archive_options_1(self): "restore_command = '\"{0}\" archive-get -B \"{1}\" --instance \"{2}\" " "--wal-file-path=%p --wal-file-name=%f --remote-host=localhost1 " "--remote-port=23 --remote-user={3}'".format( - self.probackup_path, backup_dir, 'node', self.user), + self.probackup_path, backup_dir, 'node', self.username), recovery_content) # @unittest.skip("skip") @@ -1760,27 +1537,23 @@ def test_undefined_wal_file_path(self): check that archive-push works correct with undefined --wal-file-path """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + archive_command = " ".join([f'"{self.probackup_path}"', 'archive-push', + *self.backup_dir.pb_args, '--instance=node', + '--wal-file-name=%f']) if os.name == 'posix': - archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format( - self.probackup_path, backup_dir, 'node') - elif os.name == 'nt': - archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format( - self.probackup_path, backup_dir, 'node').replace("\\","\\\\") - else: - self.assertTrue(False, 'Unexpected os family') + # Dash produces a core dump when it gets a SIGQUIT from its + # child process so replace the shell with pg_probackup + archive_command = 'exec ' + archive_command + elif os.name == "nt": + archive_command = archive_command.replace("\\","\\\\") - self.set_auto_conf( - node, - {'archive_command': archive_command}) + self.pb.set_archiving('node', node, custom_archive_command=archive_command) node.slow_start() node.safe_psql( @@ -1788,9 +1561,15 @@ def test_undefined_wal_file_path(self): "create table t_heap as select i" " as id from generate_series(0, 10) i") self.switch_wal_segment(node) + tailer = tail_file(os.path.join(node.logs_dir, "postgresql.log")) + tailer.wait_archive_push_completed() + node.stop() + + log = tail_file(os.path.join(node.logs_dir, 'postgresql.log'), collect=True) + log.wait(contains='archive-push completed successfully') # check - self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') + self.assertEqual(self.pb.show_archive(instance='node', tli=1)['min-segno'], '000000010000000000000001') # @unittest.skip("skip") # @unittest.expectedFailure @@ -1798,29 +1577,25 @@ def test_intermediate_archiving(self): """ check that archive-push works correct with --wal-file-path setting by user """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') node_pg_options = {} if node.major_version >= 13: node_pg_options['wal_keep_size'] = '0MB' else: node_pg_options['wal_keep_segments'] = '0' - self.set_auto_conf(node, node_pg_options) + node.set_auto_conf(node_pg_options) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) - wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'intermediate_dir') + wal_dir = os.path.join(self.test_path, 'intermediate_dir') shutil.rmtree(wal_dir, ignore_errors=True) os.makedirs(wal_dir) if os.name == 'posix': - self.set_archiving(backup_dir, 'node', node, custom_archive_command='cp -v %p {0}/%f'.format(wal_dir)) + self.pb.set_archiving('node', node, custom_archive_command='cp -v %p {0}/%f'.format(wal_dir)) elif os.name == 'nt': - self.set_archiving(backup_dir, 'node', node, custom_archive_command='copy /Y "%p" "{0}\\\\%f"'.format(wal_dir.replace("\\","\\\\"))) + self.pb.set_archiving('node', node, custom_archive_command='copy /Y "%p" "{0}\\\\%f"'.format(wal_dir.replace("\\","\\\\"))) else: self.assertTrue(False, 'Unexpected os family') @@ -1833,11 +1608,10 @@ def test_intermediate_archiving(self): wal_segment = '000000010000000000000001' - self.run_pb(["archive-push", "-B", backup_dir, - "--instance=node", "-D", node.data_dir, - "--wal-file-path", "{0}/{1}".format(wal_dir, wal_segment), "--wal-file-name", wal_segment]) + self.pb.archive_push('node', node, wal_file_path="{0}/{1}".format(wal_dir, wal_segment), + wal_file_name=wal_segment) - self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], wal_segment) + self.assertEqual(self.pb.show_archive(instance='node', tli=1)['min-segno'], wal_segment) # @unittest.skip("skip") # @unittest.expectedFailure @@ -1845,21 +1619,15 @@ def test_waldir_outside_pgdata_archiving(self): """ check that archive-push works correct with symlinked waldir """ - if self.pg_config_version < self.version_to_num('10.0'): - self.skipTest( - 'Skipped because waldir outside pgdata is supported since PG 10') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - external_wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'ext_wal_dir') + backup_dir = self.backup_dir + external_wal_dir = os.path.join(self.test_path, 'ext_wal_dir') shutil.rmtree(external_wal_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums', '--waldir={0}'.format(external_wal_dir)]) + node = self.pg_node.make_simple('node', initdb_params=['--waldir={0}'.format(external_wal_dir)]) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1868,8 +1636,12 @@ def test_waldir_outside_pgdata_archiving(self): " as id from generate_series(0, 10) i") self.switch_wal_segment(node) + tailer = tail_file(os.path.join(node.logs_dir, 'postgresql.log')) + tailer.wait_archive_push_completed() + node.stop() + # check - self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') + self.assertEqual(self.pb.show_archive(instance='node', tli=1)['min-segno'], '000000010000000000000001') # @unittest.skip("skip") # @unittest.expectedFailure @@ -1877,33 +1649,30 @@ def test_hexadecimal_timeline(self): """ Check that timelines are correct. """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, log_level='verbose') + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) node.pgbench_init(scale=2) # create timelines for i in range(1, 13): # print(i) node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=['--recovery-target-timeline={0}'.format(i)]) node.slow_start() node.pgbench_init(scale=2) sleep(5) - show = self.show_archive(backup_dir) + show = self.pb.show_archive() timelines = show[0]['timelines'] @@ -1934,29 +1703,27 @@ def test_archiving_and_slots(self): Check that archiving don`t break slot guarantee. """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + if not backup_dir.is_file_based: + self.skipTest("Test has no meaning for cloud storage") + + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', 'max_wal_size': '64MB'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, log_level='verbose') + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') # "pg_receivewal --create-slot --slot archive_slot --if-not-exists " # "&& pg_receivewal --synchronous -Z 1 /tmp/wal --slot archive_slot --no-loop" - self.run_binary( + self.pb.run_binary( [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', '--create-slot', '--slot', 'archive_slot', '--if-not-exists' @@ -1964,7 +1731,7 @@ def test_archiving_and_slots(self): node.pgbench_init(scale=10) - pg_receivexlog = self.run_binary( + pg_receivexlog = self.pb.run_binary( [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', '-D', os.path.join(backup_dir, 'wal', 'node'), @@ -1982,56 +1749,51 @@ def test_archiving_and_slots(self): pg_receivexlog.kill() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) node.pgbench_init(scale=20) exit(1) def test_archive_push_sanity(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_mode': 'on', 'archive_command': 'exit 1'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=50) node.stop() - self.set_archiving(backup_dir, 'node', node) + self.pb.set_archiving('node', node) os.remove(os.path.join(node.logs_dir, 'postgresql.log')) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: postgres_log_content = cleanup_ptrack(f.read()) # print(postgres_log_content) # make sure that .backup file is not compressed - self.assertNotIn('.backup.gz', postgres_log_content) + if self.archive_compress: + self.assertNotIn('.backup'+self.compress_suffix, postgres_log_content) self.assertNotIn('WARNING', postgres_log_content) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node( - backup_dir, 'node', replica, - data_dir=replica.data_dir, options=['-R']) + self.pb.restore_node('node', replica, options=['-R']) - # self.set_archiving(backup_dir, 'replica', replica, replica=True) - self.set_auto_conf(replica, {'port': replica.port}) - self.set_auto_conf(replica, {'archive_mode': 'always'}) - self.set_auto_conf(replica, {'hot_standby': 'on'}) + # self.pb.set_archiving('replica', replica, replica=True) + replica.set_auto_conf({'port': replica.port}) + replica.set_auto_conf({'archive_mode': 'always'}) + replica.set_auto_conf({'hot_standby': 'on'}) replica.slow_start(replica=True) self.wait_until_replica_catch_with_master(node, replica) @@ -2042,24 +1804,25 @@ def test_archive_push_sanity(self): replica.pgbench_init(scale=10) log = tail_file(os.path.join(replica.logs_dir, 'postgresql.log'), - collect=True) + collect=True, linetimeout=30) log.wait(regex=r"pushing file.*history") - log.wait(contains='archive-push completed successfully') + log.wait_archive_push_completed() log.wait(regex=r"pushing file.*partial") - log.wait(contains='archive-push completed successfully') + log.wait_archive_push_completed() - # make sure that .partial file is not compressed - self.assertNotIn('.partial.gz', log.content) - # make sure that .history file is not compressed - self.assertNotIn('.history.gz', log.content) + if self.archive_compress: + # make sure that .partial file is not compressed + self.assertNotIn('.partial'+self.compress_suffix, log.content) + # make sure that .history file is not compressed + self.assertNotIn('.history'+self.compress_suffix, log.content) replica.stop() log.wait_shutdown() self.assertNotIn('WARNING', cleanup_ptrack(log.content)) - output = self.show_archive( - backup_dir, 'node', as_json=False, as_text=True, + output = self.pb.show_archive( + 'node', as_json=False, as_text=True, options=['--log-level-console=INFO']) self.assertNotIn('WARNING', output) @@ -2068,27 +1831,20 @@ def test_archive_push_sanity(self): # @unittest.skip("skip") def test_archive_pg_receivexlog_partial_handling(self): """check that archive-get delivers .partial and .gz.partial files""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + if not backup_dir.is_file_based: + self.skipTest("Test has no meaning for cloud storage") - if self.get_version(node) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - if self.get_version(node) < 100000: - app_name = 'pg_receivexlog' - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - app_name = 'pg_receivewal' - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + app_name = 'pg_receivewal' + pg_receivexlog_path = self.get_bin_path('pg_receivewal') cmdline = [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -2099,7 +1855,7 @@ def test_archive_pg_receivexlog_partial_handling(self): env = self.test_env env["PGAPPNAME"] = app_name - pg_receivexlog = self.run_binary(cmdline, asynchronous=True, env=env) + pg_receivexlog = self.pb.run_binary(cmdline, asynchronous=True, env=env) if pg_receivexlog.returncode: self.assertFalse( @@ -2107,12 +1863,12 @@ def test_archive_pg_receivexlog_partial_handling(self): 'Failed to start pg_receivexlog: {0}'.format( pg_receivexlog.communicate()[1])) - self.set_auto_conf(node, {'synchronous_standby_names': app_name}) - self.set_auto_conf(node, {'synchronous_commit': 'on'}) + node.set_auto_conf({'synchronous_standby_names': app_name}) + node.set_auto_conf({'synchronous_commit': 'on'}) node.reload() # FULL - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql( "postgres", @@ -2121,8 +1877,7 @@ def test_archive_pg_receivexlog_partial_handling(self): "from generate_series(0,1000000) i") # PAGE - self.backup_node( - backup_dir, 'node', node, backup_type='page', options=['--stream']) + self.pb.backup_node('node', node, backup_type='page', options=['--stream']) node.safe_psql( "postgres", @@ -2132,15 +1887,13 @@ def test_archive_pg_receivexlog_partial_handling(self): pg_receivexlog.kill() - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, node_restored.data_dir, + self.pb.restore_node('node', node_restored, options=['--recovery-target=latest', '--recovery-target-action=promote']) - self.set_auto_conf(node_restored, {'port': node_restored.port}) - self.set_auto_conf(node_restored, {'hot_standby': 'off'}) + node_restored.set_auto_conf({'port': node_restored.port}) + node_restored.set_auto_conf({'hot_standby': 'off'}) node_restored.slow_start() @@ -2152,19 +1905,17 @@ def test_archive_pg_receivexlog_partial_handling(self): @unittest.skip("skip") def test_multi_timeline_recovery_prefetching(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=50) @@ -2177,8 +1928,7 @@ def test_multi_timeline_recovery_prefetching(self): node.stop() node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-action=promote']) @@ -2194,8 +1944,7 @@ def test_multi_timeline_recovery_prefetching(self): node.stop(['-m', 'immediate', '-D', node.data_dir]) node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ # '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=2', @@ -2208,8 +1957,7 @@ def test_multi_timeline_recovery_prefetching(self): node.stop() node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ # '--recovery-target-xid=100500', '--recovery-target-timeline=3', @@ -2217,7 +1965,7 @@ def test_multi_timeline_recovery_prefetching(self): '--no-validate']) os.remove(os.path.join(node.logs_dir, 'postgresql.log')) - restore_command = self.get_restore_command(backup_dir, 'node', node) + restore_command = self.get_restore_command(backup_dir, 'node') restore_command += ' -j 2 --batch-size=10 --log-level-console=VERBOSE' if node.major_version >= 12: @@ -2259,50 +2007,42 @@ def test_archive_get_batching_sanity(self): .gz file is corrupted and uncompressed is not, check that both corruption detected and uncompressed file is used. """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(node) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.pgbench_init(scale=50) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node( - backup_dir, 'node', replica, replica.data_dir) + self.pb.restore_node('node', node=replica) self.set_replica(node, replica, log_shipping=True) if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': 'exit 1'}) + replica.set_auto_conf({'restore_command': 'exit 1'}) else: replica.append_conf('recovery.conf', "restore_command = 'exit 1'") replica.slow_start(replica=True) # at this point replica is consistent - restore_command = self.get_restore_command(backup_dir, 'node', replica) + restore_command = self.get_restore_command(backup_dir, 'node') restore_command += ' -j 2 --batch-size=10' # print(restore_command) if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': restore_command}) + replica.set_auto_conf({'restore_command': restore_command}) else: replica.append_conf( 'recovery.conf', "restore_command = '{0}'".format(restore_command)) @@ -2326,45 +2066,49 @@ def test_archive_get_prefetch_corruption(self): Make sure that WAL corruption is detected. And --prefetch-dir is honored. """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + node.set_auto_conf({ + 'wal_compression': 'off', + }) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) - node.pgbench_init(scale=50) + node.pgbench_init(scale=20) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node( - backup_dir, 'node', replica, replica.data_dir) + self.pb.restore_node('node', node=replica) self.set_replica(node, replica, log_shipping=True) if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': 'exit 1'}) + replica.set_auto_conf({'restore_command': 'exit 1'}) else: replica.append_conf('recovery.conf', "restore_command = 'exit 1'") + log = tail_file(os.path.join(node.logs_dir, 'postgresql.log'), + linetimeout=30) + log.wait(regex=r"pushing file.*000000D") + log.wait_archive_push_completed() + replica.slow_start(replica=True) # at this point replica is consistent - restore_command = self.get_restore_command(backup_dir, 'node', replica) + restore_command = self.get_restore_command(backup_dir, 'node') restore_command += ' -j5 --batch-size=10 --log-level-console=VERBOSE' #restore_command += ' --batch-size=2 --log-level-console=VERBOSE' if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': restore_command}) + replica.set_auto_conf({'restore_command': restore_command}) else: replica.append_conf( 'recovery.conf', "restore_command = '{0}'".format(restore_command)) @@ -2387,35 +2131,31 @@ def test_archive_get_prefetch_corruption(self): # generate WAL, copy it into prefetch directory, then corrupt # some segment - node.pgbench_init(scale=20) + node.pgbench_init(scale=5) sleep(20) # now copy WAL files into prefetch directory and corrupt some of them - archive_dir = os.path.join(backup_dir, 'wal', 'node') - files = os.listdir(archive_dir) - files.sort() + files = self.get_instance_wal_list(backup_dir, 'node') + suffix = self.compress_suffix for filename in [files[-4], files[-3], files[-2], files[-1]]: - src_file = os.path.join(archive_dir, filename) + content = self.read_instance_wal(backup_dir, 'node', filename, + decompress=True) if node.major_version >= 10: wal_dir = 'pg_wal' else: wal_dir = 'pg_xlog' - if filename.endswith('.gz'): - dst_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename[:-3]) - with gzip.open(src_file, 'rb') as f_in, open(dst_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - else: - dst_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename) - shutil.copyfile(src_file, dst_file) - - # print(dst_file) + if suffix and filename.endswith(suffix): + filename = filename[:-len(suffix)] + dst_file = os.path.join(replica.data_dir, wal_dir, 'pbk_prefetch', filename) + with open(dst_file, 'wb') as f_out: + f_out.write(content) # corrupt file - if files[-2].endswith('.gz'): - filename = files[-2][:-3] + if suffix and files[-2].endswith(suffix): + filename = files[-2][:-len(suffix)] else: filename = files[-2] @@ -2425,14 +2165,13 @@ def test_archive_get_prefetch_corruption(self): f.seek(8192*2) f.write(b"SURIKEN") f.flush() - f.close # enable restore_command - restore_command = self.get_restore_command(backup_dir, 'node', replica) + restore_command = self.get_restore_command(backup_dir, 'node') restore_command += ' --batch-size=2 --log-level-console=VERBOSE' if node.major_version >= 12: - self.set_auto_conf(replica, {'restore_command': restore_command}) + replica.set_auto_conf({'restore_command': restore_command}) else: replica.append_conf( 'recovery.conf', "restore_command = '{0}'".format(restore_command)) @@ -2442,6 +2181,9 @@ def test_archive_get_prefetch_corruption(self): prefetch_line = 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename) restored_line = 'LOG: restored log file "{0}" from archive'.format(filename) + + self.wait_server_wal_exists(replica.data_dir, wal_dir, filename) + tailer = tail_file(os.path.join(replica.logs_dir, 'postgresql.log')) tailer.wait(contains=prefetch_line) tailer.wait(contains=restored_line) @@ -2452,122 +2194,102 @@ def test_archive_show_partial_files_handling(self): check that files with '.part', '.part.gz', '.partial' and '.partial.gz' siffixes are handled correctly """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node, compress=False) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node, compress=False) node.slow_start() - self.backup_node(backup_dir, 'node', node) - - wals_dir = os.path.join(backup_dir, 'wal', 'node') + self.pb.backup_node('node', node) # .part file - node.safe_psql( - "postgres", - "create table t1()") + if backup_dir.is_file_based: + wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.get_version(node) < 100000: - filename = node.safe_psql( + node.safe_psql( "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: + "create table t1()") + filename = node.safe_psql( "postgres", "SELECT file_name " "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() - filename = filename.decode('utf-8') + filename = filename.decode('utf-8') - self.switch_wal_segment(node) - - os.rename( - os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.part'.format(filename))) + self.switch_wal_segment(node) - # .gz.part file - node.safe_psql( - "postgres", - "create table t2()") + self.wait_instance_wal_exists(backup_dir, 'node', filename) + os.rename( + os.path.join(wals_dir, filename), + os.path.join(wals_dir, '{0}~tmp123451'.format(filename))) - if self.get_version(node) < 100000: - filename = node.safe_psql( + # .gz.part file + node.safe_psql( "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: + "create table t2()") + filename = node.safe_psql( "postgres", "SELECT file_name " "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() - filename = filename.decode('utf-8') + filename = filename.decode('utf-8') - self.switch_wal_segment(node) + self.switch_wal_segment(node) - os.rename( - os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.gz.part'.format(filename))) + self.wait_instance_wal_exists(backup_dir, 'node', filename) + os.rename( + os.path.join(wals_dir, filename), + os.path.join(wals_dir, f'{filename}{self.compress_suffix}~tmp234513')) # .partial file node.safe_psql( "postgres", "create table t3()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') self.switch_wal_segment(node) - os.rename( - os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.partial'.format(filename))) + self.wait_instance_wal_exists(backup_dir, 'node', filename) + file_content = self.read_instance_wal(backup_dir, 'node', filename) + self.write_instance_wal(backup_dir, 'node', f'{filename}.partial', + file_content) + self.remove_instance_wal(backup_dir, 'node', filename) # .gz.partial file node.safe_psql( "postgres", "create table t4()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') self.switch_wal_segment(node) - os.rename( - os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.gz.partial'.format(filename))) + self.wait_instance_wal_exists(backup_dir, 'node', filename) + file_content = self.read_instance_wal(backup_dir, 'node', filename) + self.write_instance_wal(backup_dir, 'node', f'{filename}{self.compress_suffix}.partial', + file_content) + self.remove_instance_wal(backup_dir, 'node', filename) - self.show_archive(backup_dir, 'node', options=['--log-level-file=VERBOSE']) + self.pb.show_archive('node', options=['--log-level-file=VERBOSE']) - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log'), 'r') as f: - log_content = f.read() + log_content = self.read_pb_log() self.assertNotIn( 'WARNING', @@ -2579,27 +2301,24 @@ def test_archive_empty_history_file(self): """ https://github.com/postgrespro/pg_probackup/issues/326 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=5) # FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=5) node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target=latest', '--recovery-target-action=promote']) @@ -2610,8 +2329,7 @@ def test_archive_empty_history_file(self): node.pgbench_init(scale=5) node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target=latest', '--recovery-target-timeline=2', @@ -2623,8 +2341,7 @@ def test_archive_empty_history_file(self): node.pgbench_init(scale=5) node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target=latest', '--recovery-target-timeline=3', @@ -2636,32 +2353,192 @@ def test_archive_empty_history_file(self): # Truncate history files for tli in range(2, 5): - file = os.path.join( - backup_dir, 'wal', 'node', '0000000{0}.history'.format(tli)) - with open(file, "w+") as f: - f.truncate() + self.write_instance_wal(backup_dir, 'node', f'0000000{tli}.history', + b'') - timelines = self.show_archive(backup_dir, 'node', options=['--log-level-file=INFO']) + timelines = self.pb.show_archive('node', options=['--log-level-file=INFO']) # check that all timelines has zero switchpoint for timeline in timelines: self.assertEqual(timeline['switchpoint'], '0/0') - log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file, 'r') as f: - log_content = f.read() - wal_dir = os.path.join(backup_dir, 'wal', 'node') + log_content = self.read_pb_log() - self.assertIn( - 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000002.history')), - log_content) - self.assertIn( - 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000003.history')), - log_content) - self.assertIn( - 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), - log_content) + self.assertRegex( + log_content, + 'WARNING: History file is corrupted or missing: "[^"]*00000002.history"') + self.assertRegex( + log_content, + 'WARNING: History file is corrupted or missing: "[^"]*00000003.history"') + self.assertRegex( + log_content, + 'WARNING: History file is corrupted or missing: "[^"]*00000004.history"') + + def test_archive_get_relative_path(self): + """ + Take a backup in archive mode, restore it and run the cluster + on it with relative pgdata path, archive-get should be ok with + relative pgdata path as well. + """ + + # initialize basic node + node = self.pg_node.make_simple( + base_dir='node', + pg_options={ + 'archive_timeout': '10s'} + ) + + # initialize the node to restore to + restored = self.pg_node.make_empty(base_dir='restored') + + # initialize pg_probackup setup including archiving + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + + # the job + node.slow_start() + self.pb.backup_node('node', node) + node.stop() + self.pb.restore_node('node', restored) + restored.set_auto_conf({"port": restored.port}) + + run_path = os.getcwd() + relative_pgdata = get_relative_path(run_path, restored.data_dir) + + restored.start(params=["-D", relative_pgdata]) + # cleanup + restored.stop() + + def test_archive_push_alot_of_files(self): + """ + Test archive-push pushes files in-order. + PBCKP-911 + """ + if self.pg_config_version < 130000: + self.skipTest("too costly to test with 16MB wal segment") + + node = self.pg_node.make_simple(base_dir='node', + initdb_params=['--wal-segsize','1'], + pg_options={ + 'archive_mode': 'on', + }) + + self.pb.init() + self.pb.add_instance('node', node) + + pg_wal_dir = os.path.join(node.data_dir, 'pg_wal') + + node.slow_start() + # create many segments + for i in range(30): + node.execute("select pg_logical_emit_message(False, 'z', repeat('0', 1024*1024))") + # EXT4 always stores in hash table, so test could skip following two + # loops if it runs on EXT4. + # + # But for XFS we have to disturb file order manually. + # 30-30-30 is empirically obtained: pg_wal/archive_status doesn't overflow + # to B+Tree yet, but already reuses some of removed items + for i in range(1,30): + fl = f'{1:08x}{0:08x}{i:08X}' + if os.path.exists(os.path.join(pg_wal_dir, fl)): + os.remove(os.path.join(pg_wal_dir, fl)) + os.remove(os.path.join(pg_wal_dir, f'archive_status/{fl}.ready')) + for i in range(30): + node.execute("select pg_logical_emit_message(False, 'z', repeat('0', 1024*1024))") + + node.stop() + + files = os.listdir(pg_wal_dir) + files.sort() + n = int(len(files)/2) + + self.pb.archive_push("node", node, wal_file_name=files[0], wal_file_path=pg_wal_dir, + options=['--threads', '10', + '--batch-size', str(n), + '--log-level-file', 'VERBOSE']) + + archived = self.get_instance_wal_list(self.backup_dir, 'node') + + self.assertListEqual(files[:n], archived) + +################################################################# +# dry-run +################################################################# + + @unittest.skipUnless(fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_dry_run_archive_push(self): + """ Check archive-push command with dry_run option""" + node = self.pg_node.make_simple('node', + set_replication=True) + self.pb.init() + self.pb.add_instance('node', node) + + node.slow_start() + node.pgbench_init(scale=10) + + walfile = node.safe_psql( + 'postgres', + 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() + self.pb.archive_push('node', node=node, wal_file_name=walfile, options=['--dry-run']) + + self.assertTrue(len(self.backup_dir.list_dirs((os.path.join(self.backup_dir, 'wal/node')))) == 0) + # Access check suit if disk mounted as read_only + if fs_backup_class.is_file_based: #AccessPath check is always true on s3 + dir_path = os.path.join(self.backup_dir, 'wal/node') + dir_mode = os.stat(dir_path).st_mode + os.chmod(dir_path, 0o400) + print(self.backup_dir) + + error_message = self.pb.archive_push('node', node=node, wal_file_name=walfile, options=['--dry-run'], + expect_error="because of changed permissions") + try: + self.assertMessage(error_message, contains='ERROR: Check permissions') + finally: + # Cleanup + os.chmod(dir_path, dir_mode) + + node.stop() + + @unittest.skipUnless(fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_archive_get_dry_run(self): + """ + Check archive-get command with dry-ryn option + """ + # initialize basic node + node = self.pg_node.make_simple( + base_dir='node', + pg_options={ + 'archive_timeout': '3s'} + ) + + # initialize the node to restore to + restored = self.pg_node.make_empty(base_dir='restored') + + # initialize pg_probackup setup including archiving + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + + # the job + node.slow_start() + node.pgbench_init(scale=10) + + self.pb.backup_node('node', node) + self.pb.restore_node('node', restored, options=['--recovery-target=latest']) + restored.set_auto_conf({"port": restored.port}) + + files = self.get_instance_wal_list(self.backup_dir, 'node') + cwd = os.getcwd() + os.chdir(restored.data_dir) + wal_dir = self.pgdata_content(os.path.join(restored.data_dir, 'pg_wal')) + self.pb.archive_get('node', wal_file_name=files[-1], wal_file_path="{0}/{1}".format('pg_wal', files[-1]), + options=['--dry-run', "-D", restored.data_dir]) + restored_wal = self.pgdata_content(os.path.join(restored.data_dir, 'pg_wal')) + self.compare_pgdata(wal_dir, restored_wal) + os.chdir(cwd) + node.stop() def cleanup_ptrack(log_content): # PBCKP-423 - need to clean ptrack warning diff --git a/tests/auth_test.py b/tests/auth_test.py index 32cabc4a1..0a8ee5909 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -8,20 +8,18 @@ import signal import time -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest from testgres import StartNodeException -module_name = 'auth_test' skip_test = False - try: from pexpect import * except ImportError: skip_test = True -class SimpleAuthTest(ProbackupTest, unittest.TestCase): +class SimpleAuthTest(ProbackupTest): # @unittest.skip("skip") def test_backup_via_unprivileged_user(self): @@ -30,16 +28,13 @@ def test_backup_via_unprivileged_user(self): run a backups without EXECUTE rights on certain functions """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True, + ptrack_enable=self.ptrack) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() if self.ptrack: @@ -49,27 +44,18 @@ def test_backup_via_unprivileged_user(self): node.safe_psql("postgres", "CREATE ROLE backup with LOGIN") - try: - self.backup_node( - backup_dir, 'node', node, options=['-U', 'backup']) - self.assertEqual( - 1, 0, - "Expecting Error due to missing grant on EXECUTE.") - except ProbackupException as e: - if self.get_version(node) < 150000: - self.assertIn( - "ERROR: query failed: ERROR: permission denied " - "for function pg_start_backup", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - "ERROR: query failed: ERROR: permission denied " - "for function pg_backup_start", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, options=['-U', 'backup'], + expect_error='due to missing grant on EXECUTE') + if self.pg_config_version < 150000: + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied " + "for function pg_start_backup") + else: + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied " + "for function pg_backup_start") - if self.get_version(node) < 150000: + if self.pg_config_version < 150000: node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION" @@ -80,59 +66,36 @@ def test_backup_via_unprivileged_user(self): "GRANT EXECUTE ON FUNCTION" " pg_backup_start(text, boolean) TO backup;") - if self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup") - else: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") + node.safe_psql( + 'postgres', + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") - try: - self.backup_node( - backup_dir, 'node', node, options=['-U', 'backup']) - self.assertEqual( - 1, 0, - "Expecting Error due to missing grant on EXECUTE.") - except ProbackupException as e: - self.assertIn( - "ERROR: query failed: ERROR: permission denied for function " - "pg_create_restore_point\nquery was: " - "SELECT pg_catalog.pg_create_restore_point($1)", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + options=['-U', 'backup'], + expect_error='due to missing grant on EXECUTE') + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied for function " + "pg_create_restore_point\nquery was: " + "SELECT pg_catalog.pg_create_restore_point($1)") node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION" " pg_create_restore_point(text) TO backup;") - try: - self.backup_node( - backup_dir, 'node', node, options=['-U', 'backup']) - self.assertEqual( - 1, 0, - "Expecting Error due to missing grant on EXECUTE.") - except ProbackupException as e: - if self.get_version(node) < 150000: - self.assertIn( - "ERROR: Query failed: ERROR: permission denied " - "for function pg_stop_backup", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - "ERROR: Query failed: ERROR: permission denied " - "for function pg_backup_stop", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + options=['-U', 'backup'], + expect_error='due to missing grant on EXECUTE') + if self.pg_config_version < 150000: + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied " + "for function pg_stop_backup") + else: + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied " + "for function pg_backup_stop") - if self.get_version(node) < self.version_to_num('10.0'): - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup") - elif self.get_version(node) < self.version_to_num('15.0'): + if self.pg_config_version < self.version_to_num('15.0'): node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " @@ -142,13 +105,11 @@ def test_backup_via_unprivileged_user(self): "postgres", "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup;") - self.backup_node( - backup_dir, 'node', node, options=['-U', 'backup']) + self.pb.backup_node('node', node, options=['-U', 'backup']) node.safe_psql("postgres", "CREATE DATABASE test1") - self.backup_node( - backup_dir, 'node', node, options=['-U', 'backup']) + self.pb.backup_node('node', node, options=['-U', 'backup']) node.safe_psql( "test1", "create table t1 as select generate_series(0,100)") @@ -157,68 +118,48 @@ def test_backup_via_unprivileged_user(self): node.slow_start() node.safe_psql( - "postgres", - "ALTER ROLE backup REPLICATION") + "postgres", + "ALTER ROLE backup REPLICATION") # FULL - self.backup_node( - backup_dir, 'node', node, options=['-U', 'backup']) + self.pb.backup_node('node', node, options=['-U', 'backup']) # PTRACK if self.ptrack: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['-U', 'backup']) + self.pb.backup_node('node', node, + backup_type='ptrack', options=['-U', 'backup']) -class AuthTest(unittest.TestCase): +class AuthTest(ProbackupTest): pb = None node = None # TODO move to object scope, replace module_name - @classmethod - def setUpClass(cls): - - super(AuthTest, cls).setUpClass() - - cls.pb = ProbackupTest() - cls.backup_dir = os.path.join(cls.pb.tmp_path, module_name, 'backup') + @unittest.skipIf(skip_test, "Module pexpect isn't installed. You need to install it.") + def setUp(self): - cls.node = cls.pb.make_simple_node( - base_dir="{}/node".format(module_name), - set_replication=True, - initdb_params=['--data-checksums', '--auth-host=md5'] - ) + super().setUp() - cls.username = cls.pb.get_username() + self.node = self.pg_node.make_simple("node", + set_replication=True, + initdb_params=['--auth-host=md5'], + pg_options={'archive_timeout': '5s'}, + ) - cls.modify_pg_hba(cls.node) + self.modify_pg_hba(self.node) - cls.pb.init_pb(cls.backup_dir) - cls.pb.add_instance(cls.backup_dir, cls.node.name, cls.node) - cls.pb.set_archiving(cls.backup_dir, cls.node.name, cls.node) + self.pb.init() + self.pb.add_instance(self.node.name, self.node) + self.pb.set_archiving(self.node.name, self.node) try: - cls.node.slow_start() + self.node.slow_start() except StartNodeException: raise unittest.skip("Node hasn't started") - if cls.pb.get_version(cls.node) < 100000: - cls.node.safe_psql( - "postgres", - "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") - elif cls.pb.get_version(cls.node) < 150000: - cls.node.safe_psql( + + version = self.pg_config_version + if version < 150000: + self.node.safe_psql( "postgres", "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " @@ -233,7 +174,7 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") else: - cls.node.safe_psql( + self.node.safe_psql( "postgres", "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " @@ -247,37 +188,54 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") - cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') - - # TODO move to object scope, replace module_name - @classmethod - def tearDownClass(cls): - cls.node.cleanup() - cls.pb.del_test_dir(module_name, '') + if version >= 150000: + home_dir = os.path.join(self.test_path, "home") + os.makedirs(home_dir, exist_ok=True) + self.test_env['HOME'] = home_dir + self.pgpass_file = os.path.join(home_dir, '.pgpass') + self.pgpass_file_lock = None + else: + # before PGv15 only true home dir were inspected. + # Since we can't have separate file per test, we have to serialize + # tests. + self.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') + self.pgpass_file_lock = self.pgpass_file + '~probackup_test_lock' + # have to lock pgpass by creating file in exclusive mode + for i in range(120): + try: + open(self.pgpass_file_lock, "x").close() + except FileExistsError: + time.sleep(1) + else: + break + else: + raise TimeoutError("can't create ~/.pgpass~probackup_test_lock for 120 seconds") - @unittest.skipIf(skip_test, "Module pexpect isn't installed. You need to install it.") - def setUp(self): self.pb_cmd = ['backup', - '-B', self.backup_dir, - '--instance', self.node.name, - '-h', '127.0.0.1', - '-p', str(self.node.port), - '-U', 'backup', - '-d', 'postgres', - '-b', 'FULL' - ] + '--instance', self.node.name, + '-h', '127.0.0.1', + '-p', str(self.node.port), + '-U', 'backup', + '-d', 'postgres', + '-b', 'FULL', + '--no-sync' + ] def tearDown(self): - if "PGPASSWORD" in self.pb.test_env.keys(): - del self.pb.test_env["PGPASSWORD"] - - if "PGPASSWORD" in self.pb.test_env.keys(): - del self.pb.test_env["PGPASSFILE"] - - try: - os.remove(self.pgpass_file) - except OSError: - pass + super().tearDown() + if not self.pgpass_file_lock: + return + if hasattr(self, "pgpass_line") and os.path.exists(self.pgpass_file): + with open(self.pgpass_file, 'r') as fl: + lines = fl.readlines() + if self.pgpass_line in lines: + lines.remove(self.pgpass_line) + if len(lines) == 0: + os.remove(self.pgpass_file) + else: + with open(self.pgpass_file, 'w') as fl: + fl.writelines(lines) + os.remove(self.pgpass_file_lock) def test_empty_password(self): """ Test case: PGPB_AUTH03 - zero password length """ @@ -313,13 +271,13 @@ def test_ctrl_c_event(self): def test_pgpassfile_env(self): """ Test case: PGPB_AUTH06 - set environment var PGPASSFILE """ - path = os.path.join(self.pb.tmp_path, module_name, 'pgpass.conf') + path = os.path.join(self.test_path, 'pgpass.conf') line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password']) self.create_pgpass(path, line) - self.pb.test_env["PGPASSFILE"] = path + self.test_env["PGPASSFILE"] = path self.assertEqual( "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + self.pb.show(self.node.name, self.pb.run(self.pb_cmd + ['-w']))["status"], "ERROR: Full backup status is not valid." ) @@ -329,16 +287,16 @@ def test_pgpass(self): self.create_pgpass(self.pgpass_file, line) self.assertEqual( "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + self.pb.show(self.node.name, self.pb.run(self.pb_cmd + ['-w']))["status"], "ERROR: Full backup status is not valid." ) def test_pgpassword(self): """ Test case: PGPB_AUTH08 - set environment var PGPASSWORD """ - self.pb.test_env["PGPASSWORD"] = "password" + self.test_env["PGPASSWORD"] = "password" self.assertEqual( "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + self.pb.show(self.node.name, self.pb.run(self.pb_cmd + ['-w']))["status"], "ERROR: Full backup status is not valid." ) @@ -346,16 +304,18 @@ def test_pgpassword_and_wrong_pgpass(self): """ Test case: PGPB_AUTH09 - Check priority between PGPASSWORD and .pgpass file""" line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password']) self.create_pgpass(self.pgpass_file, line) - self.pb.test_env["PGPASSWORD"] = "password" + self.test_env["PGPASSWORD"] = "password" self.assertEqual( "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + self.pb.show(self.node.name, self.pb.run(self.pb_cmd + ['-w']))["status"], "ERROR: Full backup status is not valid." ) def run_pb_with_auth(self, password=None, add_args = [], kill=False): - with spawn(self.pb.probackup_path, self.pb_cmd + add_args, encoding='utf-8', timeout=10) as probackup: - result = probackup.expect(u"Password for user .*:", 5) + cmd = [*self.pb_cmd, *add_args, *self.backup_dir.pb_args] + with spawn(self.probackup_path, cmd, + encoding='utf-8', timeout=60, env=self.test_env) as probackup: + result = probackup.expect(u"Password for user .*:", 10) if kill: probackup.kill(signal.SIGINT) elif result == 0: @@ -366,8 +326,7 @@ def run_pb_with_auth(self, password=None, add_args = [], kill=False): raise ExceptionPexpect("Other pexpect errors.") - @classmethod - def modify_pg_hba(cls, node): + def modify_pg_hba(self, node): """ Description: Add trust authentication for user postgres. Need for add new role and set grant. @@ -378,11 +337,12 @@ def modify_pg_hba(cls, node): with open(hba_conf, 'r+') as fio: data = fio.read() fio.seek(0) - fio.write('host\tall\t%s\t127.0.0.1/0\ttrust\n%s' % (cls.username, data)) + fio.write('host\tall\t%s\t127.0.0.1/0\ttrust\n%s' % (self.username, data)) def create_pgpass(self, path, line): - with open(path, 'w') as passfile: + self.pgpass_line = line+"\n" + with open(path, 'a') as passfile: # host:port:db:username:password - passfile.write(line) + passfile.write(self.pgpass_line) os.chmod(path, 0o600) diff --git a/tests/backup_test.py b/tests/backup_test.py index dc60228b5..2e0695b6c 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -2,29 +2,28 @@ import os import re from time import sleep, time -from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException -import shutil -from distutils.dir_util import copy_tree -from testgres import ProcessType, QueryException +from datetime import datetime + +from pg_probackup2.gdb import needs_gdb + +from .helpers.ptrack_helpers import base36enc, ProbackupTest +from .helpers.ptrack_helpers import fs_backup_class import subprocess -class BackupTest(ProbackupTest, unittest.TestCase): +class BackupTest(ProbackupTest): def test_full_backup(self): """ Just test full backup with at least two segments """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', # we need to write a lot. Lets speedup a bit. pg_options={"fsync": "off", "synchronous_commit": "off"}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Fill with data @@ -32,9 +31,9 @@ def test_full_backup(self): node.pgbench_init(scale=100, no_vacuum=True) # FULL - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) - out = self.validate_pb(backup_dir, 'node', backup_id) + out = self.pb.validate('node', backup_id) self.assertIn( "INFO: Backup {0} is valid".format(backup_id), out) @@ -43,15 +42,12 @@ def test_full_backup_stream(self): """ Just test full backup with at least two segments in stream mode """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', # we need to write a lot. Lets speedup a bit. pg_options={"fsync": "off", "synchronous_commit": "off"}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # Fill with data @@ -59,10 +55,10 @@ def test_full_backup_stream(self): node.pgbench_init(scale=100, no_vacuum=True) # FULL - backup_id = self.backup_node(backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=["--stream"]) - out = self.validate_pb(backup_dir, 'node', backup_id) + out = self.pb.validate('node', backup_id) self.assertIn( "INFO: Backup {0} is valid".format(backup_id), out) @@ -72,216 +68,152 @@ def test_full_backup_stream(self): # PGPRO-707 def test_backup_modes_archive(self): """standart backup modes with ARCHIVE WAL method""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir - full_backup_id = self.backup_node(backup_dir, 'node', node) - show_backup = self.show_pb(backup_dir, 'node')[0] + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + full_backup_id = self.pb.backup_node('node', node) + show_backup = self.pb.show('node')[0] self.assertEqual(show_backup['status'], "OK") self.assertEqual(show_backup['backup-mode'], "FULL") # postmaster.pid and postmaster.opts shouldn't be copied - excluded = True - db_dir = os.path.join( - backup_dir, "backups", 'node', full_backup_id, "database") - - for f in os.listdir(db_dir): - if ( - os.path.isfile(os.path.join(db_dir, f)) and - ( - f == "postmaster.pid" or - f == "postmaster.opts" - ) - ): - excluded = False - self.assertEqual(excluded, True) + pms = {"postmaster.pid", "postmaster.opts"} + files = self.get_backup_listdir(backup_dir, 'node', full_backup_id, + 'database') + self.assertFalse(pms.intersection(files)) + files = self.get_backup_filelist(backup_dir, 'node', full_backup_id) + self.assertFalse(pms.intersection(files.keys())) # page backup mode - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") + page_backup_id = self.pb.backup_node('node', node, backup_type="page") - show_backup_1 = self.show_pb(backup_dir, 'node')[1] + show_backup_1 = self.pb.show('node')[1] self.assertEqual(show_backup_1['status'], "OK") self.assertEqual(show_backup_1['backup-mode'], "PAGE") # delta backup mode - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta") + delta_backup_id = self.pb.backup_node('node', node, backup_type="delta") - show_backup_2 = self.show_pb(backup_dir, 'node')[2] + show_backup_2 = self.pb.show('node')[2] self.assertEqual(show_backup_2['status'], "OK") self.assertEqual(show_backup_2['backup-mode'], "DELTA") # Check parent backup self.assertEqual( full_backup_id, - self.show_pb( - backup_dir, 'node', + self.pb.show('node', backup_id=show_backup_1['id'])["parent-backup-id"]) self.assertEqual( page_backup_id, - self.show_pb( - backup_dir, 'node', + self.pb.show('node', backup_id=show_backup_2['id'])["parent-backup-id"]) # @unittest.skip("skip") def test_smooth_checkpoint(self): """full backup with smooth checkpoint""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=["-C"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + self.assertEqual(self.pb.show('node')[0]['status'], "OK") node.stop() # @unittest.skip("skip") def test_incremental_backup_without_full(self): """page backup without validated full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - try: - self.backup_node(backup_dir, 'node', node, backup_type="page") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type="page", + expect_error="because page backup should not be possible") + self.assertMessage(contains="WARNING: Valid full backup on current timeline 1 is not found") + self.assertMessage(contains="ERROR: Create new full backup before an incremental one") self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['status'], + self.pb.show('node')[0]['status'], "ERROR") # @unittest.skip("skip") def test_incremental_backup_corrupt_full(self): """page-level backup with corrupted full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) - file = os.path.join( - backup_dir, "backups", "node", backup_id, - "database", "postgresql.conf") - os.remove(file) + backup_id = self.pb.backup_node('node', node) + self.remove_backup_file(backup_dir, "node", backup_id, "database/postgresql.conf") - try: - self.validate_pb(backup_dir, 'node') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of validation of corrupted backup.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "INFO: Validate backups of the instance 'node'" in e.message and - "WARNING: Backup file" in e.message and "is not found" in e.message and - "WARNING: Backup {0} data files are corrupted".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - try: - self.backup_node(backup_dir, 'node', node, backup_type="page") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.validate('node', + expect_error="because of validation of corrupted backup") + self.assertMessage(contains="INFO: Validate backups of the instance 'node'") + self.assertMessage(contains="WARNING: Validating ") + self.assertMessage(contains="No such file") + self.assertMessage(contains=f"WARNING: Backup {backup_id} data files are corrupted") + self.assertMessage(contains="WARNING: Some backups are not valid") + + self.pb.backup_node('node', node, backup_type="page", + expect_error="because page backup should not be possible") + self.assertMessage(contains="WARNING: Valid full backup on current timeline 1 is not found") + self.assertMessage(contains="ERROR: Create new full backup before an incremental one") self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT") + self.pb.show('node', backup_id)['status'], "CORRUPT") self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") + self.pb.show('node')[1]['status'], "ERROR") # @unittest.skip("skip") def test_delta_threads_stream(self): """delta multi thread backup mode and stream""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.backup_node( - backup_dir, 'node', node, backup_type="full", + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - self.backup_node( - backup_dir, 'node', node, + self.assertEqual(self.pb.show('node')[0]['status'], "OK") + self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") + self.assertEqual(self.pb.show('node')[1]['status'], "OK") # @unittest.skip("skip") def test_page_detect_corruption(self): """make node, corrupt some page, check that backup failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) node.safe_psql( @@ -303,42 +235,28 @@ def test_page_detect_corruption(self): f.seek(9000) f.write(b"bla") f.flush() - f.close - try: - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4", "--stream", "--log-level-file=VERBOSE"]) - self.assertEqual( - 1, 0, - "Expecting Error because data file is corrupted" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Corruption detected in file "{0}", ' - 'block 1: page verification failed, calculated checksum'.format(path), - e.message) + self.pb.backup_node('node', node, backup_type="full", + options=["-j", "4", "--stream", "--log-level-file=VERBOSE"], + expect_error="because data file is corrupted") + self.assertMessage(contains=f'ERROR: Corruption detected in file "{path}", ' + 'block 1: page verification failed, calculated checksum') self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'ERROR', "Backup Status should be ERROR") # @unittest.skip("skip") def test_backup_detect_corruption(self): """make node, corrupt some page, check that backup failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + ptrack_enable=self.ptrack) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() if self.ptrack: @@ -346,8 +264,7 @@ def test_backup_detect_corruption(self): "postgres", "create extension ptrack") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) node.safe_psql( @@ -360,8 +277,7 @@ def test_backup_detect_corruption(self): "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) node.safe_psql( @@ -384,240 +300,31 @@ def test_backup_detect_corruption(self): node.slow_start() - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page verification failed, calculated checksum'.format( - heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page verification failed, calculated checksum'.format( - heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="page", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page verification failed, calculated checksum'.format( - heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - + modes = "full,delta,page" if self.ptrack: - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page verification failed, calculated checksum'.format( - heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + modes += ",ptrack" + for mode in modes.split(','): + with self.subTest(mode): + self.pb.backup_node('node', node, + backup_type=mode, + options=["-j", "4", "--stream"], + expect_error="because of block corruption") + self.assertMessage(contains= + 'ERROR: Corruption detected in file "{0}", block 1: ' + 'page verification failed, calculated checksum'.format( + heap_fullpath)) + sleep(1) # @unittest.skip("skip") def test_backup_detect_invalid_block_header(self): """make node, corrupt some page, check that backup failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - if self.ptrack: - node.safe_psql( - "postgres", - "create extension ptrack") - - node.safe_psql( - "postgres", - "create table t_heap as select 1 as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,10000) i") - - heap_path = node.safe_psql( - "postgres", - "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - - node.safe_psql( - "postgres", - "select count(*) from t_heap") - - node.safe_psql( - "postgres", - "update t_heap set id = id + 10000") - - node.stop() - - heap_fullpath = os.path.join(node.data_dir, heap_path) - with open(heap_fullpath, "rb+", 0) as f: - f.seek(8193) - f.write(b"blahblahblahblah") - f.flush() - f.close - - node.slow_start() - -# self.backup_node( -# backup_dir, 'node', node, -# backup_type="full", options=["-j", "4", "--stream"]) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="page", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - if self.ptrack: - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # @unittest.skip("skip") - def test_backup_detect_missing_permissions(self): - """make node, corrupt some page, check that backup failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() if self.ptrack: @@ -635,8 +342,7 @@ def test_backup_detect_missing_permissions(self): "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) node.safe_psql( @@ -654,92 +360,21 @@ def test_backup_detect_missing_permissions(self): f.seek(8193) f.write(b"blahblahblahblah") f.flush() - f.close node.slow_start() -# self.backup_node( -# backup_dir, 'node', node, -# backup_type="full", options=["-j", "4", "--stream"]) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="full", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="delta", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="page", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - sleep(1) - + modes = "full,delta,page" if self.ptrack: - try: - self.backup_node( - backup_dir, 'node', node, - backup_type="ptrack", options=["-j", "4", "--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of block corruption" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Corruption detected in file "{0}", block 1: ' - 'page header invalid, pd_lower'.format(heap_fullpath), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + modes += ",ptrack" + for mode in modes.split(','): + with self.subTest(mode): + self.pb.backup_node('node', node, + backup_type=mode, options=["-j", "4", "--stream"], + expect_error="because of block corruption") + self.assertMessage(contains='ERROR: Corruption detected in file ' + f'"{heap_fullpath}", block 1: ' + 'page header invalid, pd_lower') + sleep(1) # @unittest.skip("skip") def test_backup_truncate_misaligned(self): @@ -747,15 +382,11 @@ def test_backup_truncate_misaligned(self): make node, truncate file to size not even to BLCKSIZE, take backup """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -781,8 +412,7 @@ def test_backup_truncate_misaligned(self): f.flush() f.close - output = self.backup_node( - backup_dir, 'node', node, backup_type="full", + output = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"], return_id=False) self.assertIn("WARNING: File", output) @@ -791,15 +421,13 @@ def test_backup_truncate_misaligned(self): # @unittest.skip("skip") def test_tablespace_in_pgdata_pgpro_1376(self): """PGPRO-1376 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + backup_dir = self.backup_dir - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node( @@ -828,8 +456,7 @@ def test_tablespace_in_pgdata_pgpro_1376(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,1000) i") - backup_id_1 = self.backup_node( - backup_dir, 'node', node, backup_type="full", + backup_id_1 = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) node.safe_psql( @@ -839,8 +466,7 @@ def test_tablespace_in_pgdata_pgpro_1376(self): "postgres", "drop tablespace tblspace2") - self.backup_node( - backup_dir, 'node', node, backup_type="full", + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) pgdata = self.pgdata_content(node.data_dir) @@ -871,8 +497,7 @@ def test_tablespace_in_pgdata_pgpro_1376(self): node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node.data_dir) @@ -886,19 +511,14 @@ def test_basic_tablespace_handling(self): check that restore with tablespace mapping will end with success """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="full", + backup_id = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') @@ -932,32 +552,20 @@ def test_basic_tablespace_handling(self): tblspace1_new_path = self.get_tblspace_path(node, 'tblspace1_new') tblspace2_new_path = self.get_tblspace_path(node, 'tblspace2_new') - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace1_old_path, tblspace1_new_path), - "-T", "{0}={1}".format( - tblspace2_old_path, tblspace2_new_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} has no tablespaceses, ' - 'nothing to remap'.format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace1_old_path, tblspace1_new_path), + "-T", "{0}={1}".format( + tblspace2_old_path, tblspace2_new_path)], + expect_error="because tablespace mapping is incorrect") + self.assertMessage(contains= + f'ERROR: Backup {backup_id} has no tablespaceses, ' + 'nothing to remap') node.safe_psql( "postgres", @@ -967,12 +575,10 @@ def test_basic_tablespace_handling(self): "postgres", "drop tablespace some_lame_tablespace") - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -993,15 +599,11 @@ def test_tablespace_handling_1(self): make node with tablespace A, take full backup, check that restore with tablespace mapping of tablespace B will end with error """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') @@ -1013,33 +615,20 @@ def test_tablespace_handling_1(self): node, 'tblspace1', tblspc_path=tblspace1_old_path) - self.backup_node( - backup_dir, 'node', node, backup_type="full", + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace2_old_path, tblspace_new_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: --tablespace-mapping option' in e.message and - 'have an entry in tablespace_map file' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace2_old_path, tblspace_new_path)], + expect_error="because tablespace mapping is incorrect") + self.assertMessage(contains='ERROR: --tablespace-mapping option') + self.assertMessage(contains='have an entry in tablespace_map file') # @unittest.skip("skip") def test_tablespace_handling_2(self): @@ -1047,61 +636,40 @@ def test_tablespace_handling_2(self): make node without tablespaces, take full backup, check that restore with tablespace mapping will end with error """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old') tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="full", + backup_id = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace1_old_path, tblspace_new_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} has no tablespaceses, ' - 'nothing to remap'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace1_old_path, tblspace_new_path)], + expect_error="because tablespace mapping is incorrect") + self.assertMessage(contains=f'ERROR: Backup {backup_id} has no tablespaceses, ' + 'nothing to remap') # @unittest.skip("skip") + @needs_gdb def test_drop_rel_during_full_backup(self): """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() for i in range(1, 512): @@ -1128,9 +696,8 @@ def test_drop_rel_during_full_backup(self): absolute_path_2 = os.path.join(node.data_dir, relative_path_2) # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--log-level-file=LOG', '--log-level-console=LOG', '--progress'], + gdb = self.pb.backup_node('node', node, + options=['--stream', '--log-level-console=LOG', '--progress'], gdb=True) gdb.set_breakpoint('backup_files') @@ -1155,14 +722,13 @@ def test_drop_rel_during_full_backup(self): pgdata = self.pgdata_content(node.data_dir) - #with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - # log_content = f.read() - # self.assertTrue( + #log_content = self.read_pb_log() + #self.assertTrue( # 'LOG: File "{0}" is not found'.format(absolute_path) in log_content, # 'File "{0}" should be deleted but it`s not'.format(absolute_path)) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) # Physical comparison pgdata_restored = self.pgdata_content(node.data_dir) @@ -1171,14 +737,11 @@ def test_drop_rel_during_full_backup(self): @unittest.skip("skip") def test_drop_db_during_full_backup(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() for i in range(1, 2): @@ -1191,8 +754,7 @@ def test_drop_db_during_full_backup(self): "VACUUM") # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, + gdb = self.pb.backup_node('node', node, gdb=True, options=[ '--stream', '--log-level-file=LOG', '--log-level-console=LOG', '--progress']) @@ -1219,33 +781,28 @@ def test_drop_db_during_full_backup(self): pgdata = self.pgdata_content(node.data_dir) - #with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - # log_content = f.read() - # self.assertTrue( + #log_content = self.read_pb_log() + #self.assertTrue( # 'LOG: File "{0}" is not found'.format(absolute_path) in log_content, # 'File "{0}" should be deleted but it`s not'.format(absolute_path)) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) # Physical comparison pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @needs_gdb def test_drop_rel_during_backup_delta(self): """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=10) @@ -1262,11 +819,10 @@ def test_drop_rel_during_backup_delta(self): absolute_path = os.path.join(node.data_dir, relative_path) # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # DELTA backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='delta', + gdb = self.pb.backup_node('node', node, backup_type='delta', gdb=True, options=['--log-level-file=LOG']) gdb.set_breakpoint('backup_files') @@ -1286,33 +842,30 @@ def test_drop_rel_during_backup_delta(self): pgdata = self.pgdata_content(node.data_dir) - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - self.assertTrue( + log_content = self.read_pb_log() + self.assertTrue( 'LOG: File not found: "{0}"'.format(absolute_path) in log_content, 'File "{0}" should be deleted but it`s not'.format(absolute_path)) node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node=node, options=["-j", "4"]) # Physical comparison pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @needs_gdb def test_drop_rel_during_backup_page(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1327,7 +880,7 @@ def test_drop_rel_during_backup_page(self): absolute_path = os.path.join(node.data_dir, relative_path) # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql( "postgres", @@ -1335,8 +888,7 @@ def test_drop_rel_during_backup_page(self): " as id from generate_series(101,102) i") # PAGE backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='page', + gdb = self.pb.backup_node('node', node, backup_type='page', gdb=True, options=['--log-level-file=LOG']) gdb.set_breakpoint('backup_files') @@ -1351,13 +903,13 @@ def test_drop_rel_during_backup_page(self): pgdata = self.pgdata_content(node.data_dir) - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + backup_id = self.pb.show('node')[1]['id'] filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) self.assertNotIn(relative_path, filelist) node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node=node, options=["-j", "4"]) # Physical comparison pgdata_restored = self.pgdata_content(node.data_dir) @@ -1366,82 +918,76 @@ def test_drop_rel_during_backup_page(self): # @unittest.skip("skip") def test_persistent_slot_for_stream_backup(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '40MB'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( "postgres", "SELECT pg_create_physical_replication_slot('slot_1')") + # FULL backup. By default, --temp-slot=true. + self.pb.backup_node('node', node, + options=['--stream', '--slot=slot_1'], + expect_error="because replication slot already exist") + self.assertMessage(contains='ERROR: replication slot "slot_1" already exists') + # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--slot=slot_1']) + self.pb.backup_node('node', node, + options=['--stream', '--slot=slot_1', '--temp-slot=false']) # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--slot=slot_1']) + self.pb.backup_node('node', node, + options=['--stream', '--slot=slot_1', '--temp-slot=false']) # @unittest.skip("skip") def test_basic_temp_slot_for_stream_backup(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={'max_wal_size': '40MB'}) - if self.get_version(node) < self.version_to_num('10.0'): - self.skipTest('You need PostgreSQL >= 10 for this test') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=['--stream', '--temp-slot']) + # FULL backup. By default, --temp-slot=true. + self.pb.backup_node('node', node, + options=['--stream', '--slot=slot_1']) + # FULL backup - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--slot=slot_1', '--temp-slot']) + self.pb.backup_node('node', node, + options=['--stream', '--slot=slot_1', '--temp-slot=true']) # @unittest.skip("skip") + @needs_gdb def test_backup_concurrent_drop_table(self): """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=1) # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, + gdb = self.pb.backup_node('node', node, options=['--stream', '--compress'], gdb=True) @@ -1457,217 +1003,191 @@ def test_backup_concurrent_drop_table(self): 'postgres', 'CHECKPOINT') - gdb.remove_all_breakpoints() gdb.continue_execution_until_exit() gdb.kill() - show_backup = self.show_pb(backup_dir, 'node')[0] + show_backup = self.pb.show('node')[0] self.assertEqual(show_backup['status'], "OK") - # @unittest.skip("skip") def test_pg_11_adjusted_wal_segment_size(self): """""" if self.pg_config_version < self.version_to_num('11.0'): self.skipTest('You need PostgreSQL >= 11 for this test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=[ - '--data-checksums', - '--wal-segsize=64'], + initdb_params=['--wal-segsize=64'], pg_options={ 'min_wal_size': '128MB'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=5) # FULL STREAM backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgbench = node.pgbench(options=['-T', '5', '-c', '2']) pgbench.wait() # PAGE STREAM backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='page', options=['--stream']) pgbench = node.pgbench(options=['-T', '5', '-c', '2']) pgbench.wait() # DELTA STREAM backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) pgbench = node.pgbench(options=['-T', '5', '-c', '2']) pgbench.wait() # FULL ARCHIVE backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '5', '-c', '2']) pgbench.wait() # PAGE ARCHIVE backup - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench(options=['-T', '5', '-c', '2']) pgbench.wait() # DELTA ARCHIVE backup - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + backup_id = self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) # delete - output = self.delete_pb( - backup_dir, 'node', + output = self.pb.delete('node', options=[ '--expired', '--delete-wal', '--retention-redundancy=1']) # validate - self.validate_pb(backup_dir) + self.pb.validate() # merge - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) # restore node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=backup_id) + self.pb.restore_node('node', node, backup_id=backup_id) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @needs_gdb def test_sigint_handling(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, + gdb = self.pb.backup_node('node', node, gdb=True, options=['--stream', '--log-level-file=LOG']) gdb.set_breakpoint('backup_non_data_file') gdb.run_until_break() + gdb.continue_execution_until_break(200) - gdb.continue_execution_until_break(20) gdb.remove_all_breakpoints() - - gdb._execute('signal SIGINT') + gdb.signal('SIGINT') gdb.continue_execution_until_error() gdb.kill() - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + backup_id = self.pb.show('node')[0]['id'] self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], + self.pb.show('node', backup_id)['status'], 'Backup STATUS should be "ERROR"') # @unittest.skip("skip") + @needs_gdb def test_sigterm_handling(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, + gdb = self.pb.backup_node('node', node, gdb=True, options=['--stream', '--log-level-file=LOG']) gdb.set_breakpoint('backup_non_data_file') gdb.run_until_break() + gdb.continue_execution_until_break(200) - gdb.continue_execution_until_break(20) - gdb.remove_all_breakpoints() - - gdb._execute('signal SIGTERM') + gdb.signal('SIGTERM') gdb.continue_execution_until_error() - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + backup_id = self.pb.show('node')[0]['id'] self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], + self.pb.show('node', backup_id)['status'], 'Backup STATUS should be "ERROR"') # @unittest.skip("skip") + @needs_gdb def test_sigquit_handling(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, options=['--stream']) + gdb = self.pb.backup_node('node', node, gdb=True, options=['--stream']) gdb.set_breakpoint('backup_non_data_file') gdb.run_until_break() + gdb.continue_execution_until_break(200) - gdb.continue_execution_until_break(20) - gdb.remove_all_breakpoints() - - gdb._execute('signal SIGQUIT') + gdb.signal('SIGQUIT') gdb.continue_execution_until_error() - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + backup_id = self.pb.show('node')[0]['id'] self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], + self.pb.show('node', backup_id)['status'], 'Backup STATUS should be "ERROR"') # @unittest.skip("skip") def test_drop_table(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() connect_1 = node.connect("postgres") @@ -1685,8 +1205,7 @@ def test_drop_table(self): connect_2.commit() # FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # @unittest.skip("skip") def test_basic_missing_file_permissions(self): @@ -1694,14 +1213,12 @@ def test_basic_missing_file_permissions(self): if os.name == 'nt': self.skipTest('Skipped because it is POSIX only test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() relative_path = node.safe_psql( @@ -1712,22 +1229,10 @@ def test_basic_missing_file_permissions(self): os.chmod(full_path, 000) - try: - # FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Cannot open file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + # FULL backup + self.pb.backup_node('node', node, options=['--stream'], + expect_error="because of missing permissions") + self.assertMessage(regex=r"ERROR: [^\n]*: Permission denied") os.chmod(full_path, 700) @@ -1737,53 +1242,37 @@ def test_basic_missing_dir_permissions(self): if os.name == 'nt': self.skipTest('Skipped because it is POSIX only test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() full_path = os.path.join(node.data_dir, 'pg_twophase') os.chmod(full_path, 000) - try: # FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Cannot open directory', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, options=['--stream'], + expect_error="because of missing permissions") + self.assertMessage(regex=r'ERROR:[^\n]*Cannot open dir') os.rmdir(full_path) # @unittest.skip("skip") def test_backup_with_least_privileges_role(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], - pg_options={'archive_timeout': '30s'}) + pg_options={'archive_timeout': '10s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1796,81 +1285,7 @@ def test_backup_with_least_privileges_role(self): "CREATE SCHEMA ptrack; " "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + if self.pg_config_version < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -1956,43 +1371,35 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; " "GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;") - if ProbackupTest.enterprise: + if ProbackupTest.pgpro: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # FULL backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, datname='backupdb', options=['--stream', '-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, datname='backupdb', options=['-U', 'backup']) # PAGE - self.backup_node( - backup_dir, 'node', node, backup_type='page', + self.pb.backup_node('node', node, backup_type='page', datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='page', datname='backupdb', + self.pb.backup_node('node', node, backup_type='page', datname='backupdb', options=['--stream', '-U', 'backup']) # DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', datname='backupdb', options=['--stream', '-U', 'backup']) # PTRACK if self.ptrack: - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) # @unittest.skip("skip") @@ -2003,38 +1410,31 @@ def test_parent_choosing(self): PAGE1 <- CORRUPT FULL """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # PAGE1 - page1_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page1_id = self.pb.backup_node('node', node, backup_type='page') # PAGE2 - page2_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page2_id = self.pb.backup_node('node', node, backup_type='page') # Change PAGE1 to ERROR self.change_backup_status(backup_dir, 'node', page1_id, 'ERROR') # PAGE3 - page3_id = self.backup_node( - backup_dir, 'node', node, + page3_id = self.pb.backup_node('node', node, backup_type='page', options=['--log-level-file=LOG']) - log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file_path) as f: - log_file_content = f.read() + log_file_content = self.read_pb_log() self.assertIn( "WARNING: Backup {0} has invalid parent: {1}. " @@ -2051,8 +1451,7 @@ def test_parent_choosing(self): log_file_content) self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], + self.pb.show('node', backup_id=page3_id)['parent-backup-id'], full_id) # @unittest.skip("skip") @@ -2063,39 +1462,31 @@ def test_parent_choosing_1(self): PAGE1 <- (missing) FULL """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # PAGE1 - page1_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page1_id = self.pb.backup_node('node', node, backup_type='page') # PAGE2 - page2_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page2_id = self.pb.backup_node('node', node, backup_type='page') # Delete PAGE1 - shutil.rmtree( - os.path.join(backup_dir, 'backups', 'node', page1_id)) + self.remove_one_backup(backup_dir, 'node', page1_id) # PAGE3 - page3_id = self.backup_node( - backup_dir, 'node', node, + page3_id = self.pb.backup_node('node', node, backup_type='page', options=['--log-level-file=LOG']) - log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file_path) as f: - log_file_content = f.read() + log_file_content = self.read_pb_log() self.assertIn( "WARNING: Backup {0} has missing parent: {1}. " @@ -2107,8 +1498,7 @@ def test_parent_choosing_1(self): log_file_content) self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], + self.pb.show('node', backup_id=page3_id)['parent-backup-id'], full_id) # @unittest.skip("skip") @@ -2119,76 +1509,57 @@ def test_parent_choosing_2(self): PAGE1 <- OK FULL <- (missing) """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # PAGE1 - page1_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page1_id = self.pb.backup_node('node', node, backup_type='page') # PAGE2 - page2_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page2_id = self.pb.backup_node('node', node, backup_type='page') # Delete FULL - shutil.rmtree( - os.path.join(backup_dir, 'backups', 'node', full_id)) + self.remove_one_backup(backup_dir, 'node', full_id) # PAGE3 - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--log-level-file=LOG']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because FULL backup is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Valid full backup on current timeline 1 is not found' in e.message and - 'ERROR: Create new full backup before an incremental one' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + backup_type='page', options=['--log-level-file=LOG'], + expect_error="because FULL backup is missing") + self.assertMessage(contains='WARNING: Valid full backup on current timeline 1 is not found') + self.assertMessage(contains='ERROR: Create new full backup before an incremental one') self.assertEqual( - self.show_pb( - backup_dir, 'node')[2]['status'], + self.pb.show('node')[2]['status'], 'ERROR') # @unittest.skip("skip") + @needs_gdb def test_backup_with_less_privileges_role(self): """ check permissions correctness from documentation: https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#configuring-the-database-cluster """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], pg_options={ - 'archive_timeout': '30s', + 'archive_timeout': '10s', 'archive_mode': 'always', - 'checkpoint_timeout': '60s', + 'checkpoint_timeout': '30s', 'wal_level': 'logical'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_config('node', options=['--archive-timeout=30s']) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -2200,43 +1571,10 @@ def test_backup_with_less_privileges_role(self): 'backupdb', 'CREATE EXTENSION ptrack') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + if self.pg_config_version < 150000: node.safe_psql( 'backupdb', + "BEGIN; " "CREATE ROLE backup WITH LOGIN; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " @@ -2277,106 +1615,84 @@ def test_backup_with_less_privileges_role(self): 'ALTER ROLE backup WITH REPLICATION;') # FULL backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, datname='backupdb', options=['--stream', '-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, datname='backupdb', options=['-U', 'backup']) # PAGE - self.backup_node( - backup_dir, 'node', node, backup_type='page', + self.pb.backup_node('node', node, backup_type='page', datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='page', datname='backupdb', + self.pb.backup_node('node', node, backup_type='page', datname='backupdb', options=['--stream', '-U', 'backup']) # DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', datname='backupdb', options=['--stream', '-U', 'backup']) # PTRACK if self.ptrack: - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - if self.get_version(node) < 90600: - return - # Restore as replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'node', replica) + self.pb.restore_node('node', node=replica) self.set_replica(node, replica) - self.add_instance(backup_dir, 'replica', replica) - self.set_config( - backup_dir, 'replica', - options=['--archive-timeout=120s', '--log-level-console=LOG']) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - self.set_auto_conf(replica, {'hot_standby': 'on'}) + self.pb.add_instance('replica', replica) + self.pb.set_config('replica', + options=['--archive-timeout=60s', '--log-level-console=LOG']) + self.pb.set_archiving('replica', replica, replica=True) + replica.set_auto_conf({'hot_standby': 'on'}) # freeze bgwriter to get rid of RUNNING XACTS records # bgwriter_pid = node.auxiliary_pids[ProcessType.BackgroundWriter][0] # gdb_checkpointer = self.gdb_attach(bgwriter_pid) - copy_tree( - os.path.join(backup_dir, 'wal', 'node'), - os.path.join(backup_dir, 'wal', 'replica')) - replica.slow_start(replica=True) - # self.switch_wal_segment(node) - # self.switch_wal_segment(node) + # make sure replica will archive wal segment with backup start point + lsn = self.switch_wal_segment(node, and_tx=True) + self.wait_until_lsn_replayed(replica, lsn) + replica.execute('CHECKPOINT') + replica.poll_query_until(f"select redo_lsn >= '{lsn}' from pg_control_checkpoint()") - self.backup_node( - backup_dir, 'replica', replica, - datname='backupdb', options=['-U', 'backup']) + self.pb.backup_replica_node('replica', replica, master=node, + datname='backupdb', options=['-U', 'backup']) # stream full backup from replica - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, datname='backupdb', options=['--stream', '-U', 'backup']) # self.switch_wal_segment(node) # PAGE backup from replica - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='page', - datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) + self.pb.backup_replica_node('replica', replica, master=node, + backup_type='page', datname='backupdb', + options=['-U', 'backup']) - self.backup_node( - backup_dir, 'replica', replica, backup_type='page', + self.pb.backup_node('replica', replica, backup_type='page', datname='backupdb', options=['--stream', '-U', 'backup']) # DELTA backup from replica - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='delta', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'replica', replica, backup_type='delta', + self.pb.backup_replica_node('replica', replica, master=node, + backup_type='delta', datname='backupdb', + options=['-U', 'backup']) + self.pb.backup_node('replica', replica, backup_type='delta', datname='backupdb', options=['--stream', '-U', 'backup']) # PTRACK backup from replica if self.ptrack: - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - datname='backupdb', options=['-U', 'backup']) - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', + self.pb.backup_replica_node('replica', replica, master=node, + backup_type='ptrack', datname='backupdb', + options=['-U', 'backup']) + self.pb.backup_node('replica', replica, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) @unittest.skip("skip") @@ -2384,14 +1700,12 @@ def test_issue_132(self): """ https://github.com/postgrespro/pg_probackup/issues/132 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() with node.connect("postgres") as conn: @@ -2400,13 +1714,12 @@ def test_issue_132(self): "CREATE TABLE t_{0} as select 1".format(i)) conn.commit() - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -2418,16 +1731,14 @@ def test_issue_132_1(self): """ https://github.com/postgrespro/pg_probackup/issues/132 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) # TODO: check version of old binary, it should be 2.1.4, 2.1.5 or 2.2.1 - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() with node.connect("postgres") as conn: @@ -2436,101 +1747,55 @@ def test_issue_132_1(self): "CREATE TABLE t_{0} as select 1".format(i)) conn.commit() - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream'], old_binary=True) + full_id = self.pb.backup_node('node', node, options=['--stream'], old_binary=True) - delta_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', + delta_id = self.pb.backup_node('node', node, backup_type='delta', options=['--stream'], old_binary=True) node.cleanup() # make sure that new binary can detect corruption - try: - self.validate_pb(backup_dir, 'node', backup_id=full_id) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because FULL backup is CORRUPT" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb(backup_dir, 'node', backup_id=delta_id) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because FULL backup is CORRUPT" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', backup_id=full_id, + expect_error="because FULL backup is CORRUPT") + self.assertMessage(contains= + f'WARNING: Backup {full_id} is a victim of metadata corruption') + + self.pb.validate('node', backup_id=delta_id, + expect_error="because FULL backup is CORRUPT") + self.assertMessage(contains= + f'WARNING: Backup {full_id} is a victim of metadata corruption') self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'], + 'CORRUPT', self.pb.show('node', full_id)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'], + 'ORPHAN', self.pb.show('node', delta_id)['status'], 'Backup STATUS should be "ORPHAN"') # check that revalidation is working correctly - try: - self.restore_node( - backup_dir, 'node', node, backup_id=delta_id) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because FULL backup is CORRUPT" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', backup_id=delta_id, + expect_error="because FULL backup is CORRUPT") + self.assertMessage(contains= + f'WARNING: Backup {full_id} is a victim of metadata corruption') self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'], + 'CORRUPT', self.pb.show('node', full_id)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'], + 'ORPHAN', self.pb.show('node', delta_id)['status'], 'Backup STATUS should be "ORPHAN"') # check that '--no-validate' do not allow to restore ORPHAN backup -# try: -# self.restore_node( -# backup_dir, 'node', node, backup_id=delta_id, -# options=['--no-validate']) -# # we should die here because exception is what we expect to happen -# self.assertEqual( -# 1, 0, -# "Expecting Error because FULL backup is CORRUPT" -# "\n Output: {0} \n CMD: {1}".format( -# repr(self.output), self.cmd)) -# except ProbackupException as e: -# self.assertIn( -# 'Insert data', -# e.message, -# '\n Unexpected Error Message: {0}\n CMD: {1}'.format( -# repr(e.message), self.cmd)) +# self.pb.restore_node('node', node=node, backup_id=delta_id, +# options=['--no-validate'], +# expect_error="because FULL backup is CORRUPT") +# self.assertMessage(contains='Insert data') node.cleanup() - output = self.restore_node( - backup_dir, 'node', node, backup_id=full_id, options=['--force']) + output = self.pb.restore_node('node', node, backup_id=full_id, options=['--force']) self.assertIn( 'WARNING: Backup {0} has status: CORRUPT'.format(full_id), @@ -2550,8 +1815,7 @@ def test_issue_132_1(self): node.cleanup() - output = self.restore_node( - backup_dir, 'node', node, backup_id=delta_id, options=['--force']) + output = self.pb.restore_node('node', node, backup_id=delta_id, options=['--force']) self.assertIn( 'WARNING: Backup {0} is orphan.'.format(delta_id), @@ -2573,31 +1837,28 @@ def test_note_sanity(self): """ test that adding note to backup works as expected """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=['--stream', '--log-level-file=LOG', '--note=test_note']) - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') - print(self.show_pb(backup_dir, as_text=True, as_json=True)) + print(self.pb.show(as_text=True, as_json=True)) self.assertEqual(show_backups[0]['note'], "test_note") - self.set_backup(backup_dir, 'node', backup_id, options=['--note=none']) + self.pb.set_backup('node', backup_id, options=['--note=none']) - backup_meta = self.show_pb(backup_dir, 'node', backup_id) + backup_meta = self.pb.show('node', backup_id) self.assertNotIn( 'note', @@ -2606,151 +1867,96 @@ def test_note_sanity(self): # @unittest.skip("skip") def test_parent_backup_made_by_newer_version(self): """incremental backup with parent made by newer version""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() + node = self.pg_node.make_simple('node') - backup_id = self.backup_node(backup_dir, 'node', node) + backup_dir = self.backup_dir - control_file = os.path.join( - backup_dir, "backups", "node", backup_id, - "backup.control") + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + backup_id = self.pb.backup_node('node', node) version = self.probackup_version fake_new_version = str(int(version.split('.')[0]) + 1) + '.0.0' - with open(control_file, 'r') as f: - data = f.read(); - - data = data.replace(version, fake_new_version) + with self.modify_backup_control(backup_dir, "node", backup_id) as cf: + cf.data = cf.data.replace(version, fake_new_version) - with open(control_file, 'w') as f: - f.write(data); - - try: - self.backup_node(backup_dir, 'node', node, backup_type="page") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental backup should not be possible " - "if parent made by newer version.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( + self.pb.backup_node('node', node, backup_type="page", + expect_error="because incremental backup should not be possible") + self.assertMessage(contains= "pg_probackup do not guarantee to be forward compatible. " - "Please upgrade pg_probackup binary.", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + "Please upgrade pg_probackup binary.") self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") + self.pb.show('node')[1]['status'], "ERROR") # @unittest.skip("skip") def test_issue_289(self): """ https://github.com/postgrespro/pg_probackup/issues/289 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--archive-timeout=10s']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because full backup is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertNotIn( - "INFO: Wait for WAL segment", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "ERROR: Create new full backup before an incremental one", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + backup_type='page', options=['--archive-timeout=10s'], + expect_error="because full backup is missing") + self.assertMessage(has_no="INFO: Wait for WAL segment") + self.assertMessage(contains="ERROR: Create new full backup before an incremental one") self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") + self.pb.show('node')[0]['status'], "ERROR") # @unittest.skip("skip") def test_issue_290(self): """ + For archive backup make sure that archive dir exists. + https://github.com/postgrespro/pg_probackup/issues/290 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + if not backup_dir.is_file_based: + self.skipTest("directories are not implemented on cloud storage") - os.rmdir( - os.path.join(backup_dir, "wal", "node")) + node = self.pg_node.make_simple('node') - node.slow_start() + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) - try: - self.backup_node( - backup_dir, 'node', node, - options=['--archive-timeout=10s']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because full backup is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertNotIn( - "INFO: Wait for WAL segment", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.remove_instance_waldir(backup_dir, 'node') - self.assertIn( - "WAL archive directory is not accessible", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + node.slow_start() + + self.pb.backup_node('node', node, + options=['--archive-timeout=10s'], + expect_error="because full backup is missing") + self.assertMessage(has_no="INFO: Wait for WAL segment") + self.assertMessage(contains="WAL archive directory is not accessible") self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") + self.pb.show('node')[0]['status'], "ERROR") @unittest.skip("skip") def test_issue_203(self): """ https://github.com/postgrespro/pg_probackup/issues/203 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() with node.connect("postgres") as conn: @@ -2759,17 +1965,14 @@ def test_issue_203(self): "CREATE TABLE t_{0} as select 1".format(i)) conn.commit() - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream', '-j2']) + full_id = self.pb.backup_node('node', node, options=['--stream', '-j2']) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', - node_restored, data_dir=node_restored.data_dir) + self.pb.restore_node('node', node=node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -2777,22 +1980,22 @@ def test_issue_203(self): # @unittest.skip("skip") def test_issue_231(self): """ + Backups get the same ID if they are created within the same second. https://github.com/postgrespro/pg_probackup/issues/231 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) datadir = os.path.join(node.data_dir, '123') t0 = time() while True: - with self.assertRaises(ProbackupException) as ctx: - self.backup_node(backup_dir, 'node', node) - pb1 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0] + output = self.pb.backup_node('node', node, + expect_error=True) + pb1 = re.search(r' backup ID: ([^\s,]+),', output).groups()[0] t = time() if int(pb1, 36) == int(t) and t % 1 < 0.5: @@ -2805,9 +2008,8 @@ def test_issue_231(self): # sleep to the second's end so backup will not sleep for a second. sleep(1 - t % 1) - with self.assertRaises(ProbackupException) as ctx: - self.backup_node(backup_dir, 'node', node) - pb2 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0] + output = self.pb.backup_node('node', node, expect_error=True) + pb2 = re.search(r' backup ID: ([^\s,]+),', output).groups()[0] self.assertNotEqual(pb1, pb2) @@ -2815,43 +2017,38 @@ def test_incr_backup_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - initdb_params=['--data-checksums']) + node1 = self.pg_node.make_simple('node1') node1.cleanup() node.pgbench_init(scale=5) # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, options=['-T', '10', '-c', '1']) - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + backup_id = self.pb.backup_node('node', node, backup_type='delta') node.safe_psql( 'postgres', 'reindex index pg_type_oid_index') - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_id = self.pb.backup_node('node', node, backup_type='delta') # incremental restore into node1 node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() node.safe_psql( @@ -2859,21 +2056,19 @@ def test_incr_backup_filenode_map(self): 'select 1') # @unittest.skip("skip") + @needs_gdb def test_missing_wal_segment(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], pg_options={'archive_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=10) @@ -2891,8 +2086,7 @@ def test_missing_wal_segment(self): pg_wal_dir = os.path.join(node.data_dir, 'pg_xlog') # Full backup in streaming mode - gdb = self.backup_node( - backup_dir, 'node', node, datname='backupdb', + gdb = self.pb.backup_node('node', node, datname='backupdb', options=['--stream', '--log-level-file=INFO'], gdb=True) # break at streaming start @@ -2934,26 +2128,23 @@ def test_missing_wal_segment(self): # @unittest.skip("skip") def test_missing_replication_permission(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) -# self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) +# self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'node', replica) + self.pb.restore_node('node', node=replica) # Settings for Replica self.set_replica(node, replica) @@ -2963,50 +2154,7 @@ def test_missing_replication_permission(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + if self.pg_config_version < 150000: node.safe_psql( 'backupdb', "CREATE ROLE backup WITH LOGIN; " @@ -3052,7 +2200,7 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) - if ProbackupTest.enterprise: + if ProbackupTest.pgpro: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " @@ -3061,58 +2209,40 @@ def test_missing_replication_permission(self): sleep(2) replica.promote() - # Delta backup - try: - self.backup_node( - backup_dir, 'node', replica, backup_type='delta', - data_dir=replica.data_dir, datname='backupdb', options=['--stream', '-U', 'backup']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - # 9.5: ERROR: must be superuser or replication role to run a backup - # >=9.6: FATAL: must be superuser or replication role to start walsender - if self.pg_config_version < 160000: - self.assertRegex( - e.message, - "ERROR: must be superuser or replication role to run a backup|" - "FATAL: must be superuser or replication role to start walsender", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - else: - self.assertRegex( - e.message, - "FATAL: permission denied to start WAL sender\n" - "DETAIL: Only roles with the REPLICATION", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + # Delta backup + self.pb.backup_node('node', replica, backup_type='delta', + data_dir=replica.data_dir, datname='backupdb', + options=['--stream', '-U', 'backup'], + expect_error="because incremental backup should not be possible") + + if self.pg_config_version < 160000: + self.assertMessage( + contains=r"FATAL: must be superuser or replication role to start walsender") + else: + self.assertMessage( + contains="FATAL: permission denied to start WAL sender\n" + "DETAIL: Only roles with the REPLICATION") # @unittest.skip("skip") def test_missing_replication_permission_1(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'node', replica) + self.pb.restore_node('node', node=replica) # Settings for Replica self.set_replica(node, replica) @@ -3122,51 +2252,8 @@ def test_missing_replication_permission_1(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + if self.pg_config_version >= 100000 and self.pg_config_version < 150000: node.safe_psql( 'backupdb', "CREATE ROLE backup WITH LOGIN; " @@ -3212,7 +2299,7 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) - if ProbackupTest.enterprise: + if ProbackupTest.pgpro: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " @@ -3221,11 +2308,10 @@ def test_missing_replication_permission_1(self): replica.promote() # PAGE - output = self.backup_node( - backup_dir, 'node', replica, backup_type='page', + output = self.pb.backup_node('node', replica, backup_type='page', data_dir=replica.data_dir, datname='backupdb', options=['-U', 'backup'], return_id=False) - + self.assertIn( 'WARNING: Valid full backup on current timeline 2 is not found, trying to look up on previous timelines', output) @@ -3237,7 +2323,6 @@ def test_missing_replication_permission_1(self): # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender' # OS-dependant messages: # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (::1), port 12101 failed: Connection refused\n\tIs the server running on that host and accepting TCP/IP connections?\nconnection to server at "localhost" (127.0.0.1), port 12101 failed: FATAL: must be superuser or replication role to start walsender' - if self.pg_config_version < 160000: self.assertRegex( output, @@ -3252,69 +2337,50 @@ def test_missing_replication_permission_1(self): # @unittest.skip("skip") def test_basic_backup_default_transaction_read_only(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={'default_transaction_read_only': 'on'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - try: - node.safe_psql( - 'postgres', - 'create temp table t1()') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except QueryException as e: - self.assertIn( - "cannot execute CREATE TABLE in a read-only transaction", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + error_result = node.safe_psql('postgres', + 'create temp table t1()', expect_error=True) + + self.assertMessage(error_result, contains="cannot execute CREATE TABLE in a read-only transaction") # FULL backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=['--stream']) # DELTA backup - self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--stream']) + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) # PAGE backup - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # @unittest.skip("skip") + @needs_gdb def test_backup_atexit(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=5) # Full backup in streaming mode - gdb = self.backup_node( - backup_dir, 'node', node, + gdb = self.pb.backup_node('node', node, options=['--stream', '--log-level-file=VERBOSE'], gdb=True) # break at streaming start @@ -3322,61 +2388,54 @@ def test_backup_atexit(self): gdb.run_until_break() gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') - sleep(1) + gdb.signal('SIGINT') - self.assertEqual( - self.show_pb( - backup_dir, 'node')[0]['status'], 'ERROR') + timeout = 60 + status = self.pb.show('node')[0]['status'] + while status == 'RUNNING' or timeout > 0: + sleep(1) + timeout = timeout - 1 + status = self.pb.show('node')[0]['status'] - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - #print(log_content) - self.assertIn( - 'WARNING: A backup is in progress, stopping it.', + self.assertEqual(status, 'ERROR') + + log_content = self.read_pb_log() + + self.assertIn( + 'WARNING: A backup is in progress, stopping it', log_content) - if self.get_version(node) < 150000: - self.assertIn( + if self.pg_config_version < 150000: + self.assertIn( 'FROM pg_catalog.pg_stop_backup', log_content) - else: - self.assertIn( + else: + self.assertIn( 'FROM pg_catalog.pg_backup_stop', log_content) - self.assertIn( + self.assertIn( 'setting its status to ERROR', log_content) # @unittest.skip("skip") def test_pg_stop_backup_missing_permissions(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=5) self.simple_bootstrap(node, 'backup') - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup') - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') - elif self.get_version(node) < 150000: + if self.pg_config_version < 150000: node.safe_psql( 'postgres', 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') @@ -3385,111 +2444,66 @@ def test_pg_stop_backup_missing_permissions(self): 'postgres', 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) FROM backup') - + if self.pg_config_version < 150000: + stop_backup = "pg_stop_backup" + else: + stop_backup = "pg_backup_stop" # Full backup in streaming mode - try: - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '-U', 'backup']) - # we should die here because exception is what we expect to happen - if self.get_version(node) < 150000: - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions on pg_stop_backup " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - else: - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions on pg_backup_stop " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - if self.get_version(node) < 150000: - self.assertIn( - "ERROR: permission denied for function pg_stop_backup", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - else: - self.assertIn( - "ERROR: permission denied for function pg_backup_stop", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "query was: SELECT pg_catalog.txid_snapshot_xmax", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + options=['--stream', '-U', 'backup'], + expect_error=f"because of missing permissions on {stop_backup}") + self.assertMessage(contains=f"ERROR: permission denied for function {stop_backup}") + self.assertMessage(contains="query was: SELECT pg_catalog.txid_snapshot_xmax") # @unittest.skip("skip") def test_start_time(self): """Test, that option --start-time allows to set backup_id and restore""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--stream', '--start-time={0}'.format(str(startTime))]) + startTimeFull = int(time()) + self.pb.backup_node('node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTimeFull)]) # restore FULL backup by backup_id calculated from start-time - self.restore_node( - backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_full'), - backup_id=base36enc(startTime)) + # cleanup it if we have leftover from a failed test + node_restored_full = self.pg_node.make_empty('node_restored_full') + self.pb.restore_node('node', node_restored_full, + backup_id=base36enc(startTimeFull)) #FULL backup with incorrect start time - try: - startTime = str(int(time()-100000)) - self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--stream', '--start-time={0}'.format(startTime)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - 'Expecting Error because start time for new backup must be newer ' - '\n Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + startTime = startTimeFull-100000 + self.pb.backup_node('node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)], + expect_error="because start time for new backup must be newer") + self.assertMessage( + regex=r"ERROR: Can't assign backup_id from requested start_time " + r"\(\w*\), this time must be later that backup \w*\n") # DELTA backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + startTime = max(int(time()), startTimeFull+1) + self.pb.backup_node('node', node, backup_type='delta', options=['--stream', '--start-time={0}'.format(str(startTime))]) # restore DELTA backup by backup_id calculated from start-time - self.restore_node( - backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_delta'), + node_restored_delta = self.pg_node.make_empty('node_restored_delta') + self.pb.restore_node('node', node_restored_delta, backup_id=base36enc(startTime)) # PAGE backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type='page', + startTime = max(int(time()), startTime+1) + self.pb.backup_node('node', node, backup_type='page', options=['--stream', '--start-time={0}'.format(str(startTime))]) # restore PAGE backup by backup_id calculated from start-time - self.restore_node( - backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_page'), - backup_id=base36enc(startTime)) + node_restored_page = self.pg_node.make_empty('node_restored_page') + self.pb.restore_node('node', node=node_restored_page, + backup_id=base36enc(startTime)) # PTRACK backup if self.ptrack: @@ -3497,77 +2511,63 @@ def test_start_time(self): 'postgres', 'create extension ptrack') - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + startTime = max(int(time()), startTime+1) + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream', '--start-time={0}'.format(str(startTime))]) # restore PTRACK backup by backup_id calculated from start-time - self.restore_node( - backup_dir, 'node', - data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_ptrack'), + node_restored_ptrack = self.pg_node.make_empty('node_restored_ptrack') + self.pb.restore_node('node', node_restored_ptrack, backup_id=base36enc(startTime)) # @unittest.skip("skip") def test_start_time_few_nodes(self): """Test, that we can synchronize backup_id's for different DBs""" - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), + node1 = self.pg_node.make_simple('node1', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - backup_dir1 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup1') - self.init_pb(backup_dir1) - self.add_instance(backup_dir1, 'node1', node1) - self.set_archiving(backup_dir1, 'node1', node1) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node1', node1) + self.pb.set_archiving('node1', node1) node1.slow_start() - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2'), + node2 = self.pg_node.make_simple('node2', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - backup_dir2 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup2') - self.init_pb(backup_dir2) - self.add_instance(backup_dir2, 'node2', node2) - self.set_archiving(backup_dir2, 'node2', node2) + self.pb.add_instance('node2', node2) + self.pb.set_archiving('node2', node2) node2.slow_start() # FULL backup - startTime = str(int(time())) - self.backup_node( - backup_dir1, 'node1', node1, backup_type='full', + startTime = int(time()) + self.pb.backup_node('node1', node1, backup_type='full', options=['--stream', '--start-time={0}'.format(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type='full', + self.pb.backup_node('node2', node2, backup_type='full', options=['--stream', '--start-time={0}'.format(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[0] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] + show_backup1 = self.pb.show('node1')[0] + show_backup2 = self.pb.show('node2')[0] self.assertEqual(show_backup1['id'], show_backup2['id']) # DELTA backup - startTime = str(int(time())) - self.backup_node( - backup_dir1, 'node1', node1, backup_type='delta', + startTime = max(int(time()), startTime+1) + self.pb.backup_node('node1', node1, backup_type='delta', options=['--stream', '--start-time={0}'.format(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type='delta', + self.pb.backup_node('node2', node2, backup_type='delta', options=['--stream', '--start-time={0}'.format(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[1] - show_backup2 = self.show_pb(backup_dir2, 'node2')[1] + show_backup1 = self.pb.show('node1')[1] + show_backup2 = self.pb.show('node2')[1] self.assertEqual(show_backup1['id'], show_backup2['id']) # PAGE backup - startTime = str(int(time())) - self.backup_node( - backup_dir1, 'node1', node1, backup_type='page', + startTime = max(int(time()), startTime+1) + self.pb.backup_node('node1', node1, backup_type='page', options=['--stream', '--start-time={0}'.format(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type='page', + self.pb.backup_node('node2', node2, backup_type='page', options=['--stream', '--start-time={0}'.format(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[2] - show_backup2 = self.show_pb(backup_dir2, 'node2')[2] + show_backup1 = self.pb.show('node1')[2] + show_backup2 = self.pb.show('node2')[2] self.assertEqual(show_backup1['id'], show_backup2['id']) # PTRACK backup @@ -3579,27 +2579,24 @@ def test_start_time_few_nodes(self): 'postgres', 'create extension ptrack') - startTime = str(int(time())) - self.backup_node( - backup_dir1, 'node1', node1, backup_type='ptrack', + startTime = max(int(time()), startTime+1) + self.pb.backup_node( + 'node1', node1, backup_type='ptrack', options=['--stream', '--start-time={0}'.format(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type='ptrack', + self.pb.backup_node('node2', node2, backup_type='ptrack', options=['--stream', '--start-time={0}'.format(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[3] - show_backup2 = self.show_pb(backup_dir2, 'node2')[3] + show_backup1 = self.pb.show('node1')[3] + show_backup2 = self.pb.show('node2')[3] self.assertEqual(show_backup1['id'], show_backup2['id']) def test_regress_issue_585(self): """https://github.com/postgrespro/pg_probackup/issues/585""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple( + base_dir='node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # create couple of files that looks like db files @@ -3608,12 +2605,10 @@ def test_regress_issue_585(self): with open(os.path.join(node.data_dir, 'pg_multixact/members/1000'),'wb') as f: pass - self.backup_node( - backup_dir, 'node', node, backup_type='full', + self.pb.backup_node('node', node, backup_type='full', options=['--stream']) - output = self.backup_node( - backup_dir, 'node', node, backup_type='delta', + output = self.pb.backup_node('node', node, backup_type='delta', options=['--stream'], return_id=False, ) @@ -3621,30 +2616,27 @@ def test_regress_issue_585(self): node.cleanup() - output = self.restore_node(backup_dir, 'node', node) + output = self.pb.restore_node('node', node) self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like') def test_2_delta_backups(self): """https://github.com/postgrespro/pg_probackup/issues/596""" - node = self.make_simple_node('node', - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + backup_dir = self.backup_dir - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - # self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + # self.pb.set_archiving('node', node) node.slow_start() # FULL - full_backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"]) + full_backup_id = self.pb.backup_node('node', node, options=["--stream"]) # delta backup mode - delta_backup_id1 = self.backup_node( - backup_dir, 'node', node, backup_type="delta", options=["--stream"]) + delta_backup_id1 = self.pb.backup_node('node', node, backup_type="delta", options=["--stream"]) - delta_backup_id2 = self.backup_node( - backup_dir, 'node', node, backup_type="delta", options=["--stream"]) + delta_backup_id2 = self.pb.backup_node('node', node, backup_type="delta", options=["--stream"]) # postgresql.conf and pg_hba.conf shouldn't be copied conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id1, 'database', 'postgresql.conf') @@ -3656,3 +2648,308 @@ def test_2_delta_backups(self): self.assertFalse( os.path.exists(conf_file), "File should not exist: {0}".format(conf_file)) + + + ######################################### + # --dry-run + ######################################### + + def test_dry_run_backup(self): + """ + Test dry-run option for full backup + """ + node = self.pg_node.make_simple('node', + ptrack_enable=self.ptrack, + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + external_dir = self.get_tblspace_path(node, 'somedirectory') + os.mkdir(external_dir) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=50, no_vacuum=True) + + backup_dir = self.backup_dir + + content_before = self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + + # FULL archive + backup_id = self.pb.backup_node('node', node, options=['--dry-run', '--note=test_note', + '--external-dirs={0}'.format(external_dir)]) + + show_backups = self.pb.show('node') + self.assertEqual(len(show_backups), 0) + + self.compare_pgdata( + content_before, + self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + ) + + # FULL stream + backup_id = self.pb.backup_node('node', node, options=['--stream', '--dry-run', '--note=test_note', + '--external-dirs={0}'.format(external_dir)]) + + show_backups = self.pb.show('node') + self.assertEqual(len(show_backups), 0) + + self.compare_pgdata( + content_before, + self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + ) + + # do FULL + backup_id = self.pb.backup_node('node', node, + options=['--stream', '--external-dirs={0}'.format(external_dir), + '--note=test_note']) + # Add some data changes to better testing + pgbench = node.pgbench(options=['-T', '2', '--no-vacuum']) + pgbench.wait() + + content_before = self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + + # DELTA + delta_backup_id = self.pb.backup_node('node', node, backup_type="delta", + options=['--stream', '--external-dirs={0}'.format(external_dir), + '--note=test_note', '--dry-run']) + # DELTA + delta_backup_id = self.pb.backup_node('node', node, backup_type="delta", + options=['--external-dirs={0}'.format(external_dir), + '--note=test_note', '--dry-run']) + show_backups = self.pb.show('node') + self.assertEqual(len(show_backups), 1) + + self.compare_pgdata( + content_before, + self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + ) + + # do DELTA + delta_backup_id = self.pb.backup_node('node', node, backup_type="delta", + options=['--stream', '--external-dirs={0}'.format(external_dir), + '--note=test_note']) + # Add some data changes + pgbench = node.pgbench(options=['-T', '2', '--no-vacuum']) + pgbench.wait() + + instance_before = self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + + # PAGE + page_backup_id = self.pb.backup_node('node', node, backup_type="page", + options=['--external-dirs={0}'.format(external_dir), + '--note=test_note', '--dry-run']) + show_backups = self.pb.show('node') + self.assertEqual(len(show_backups), 2) + + self.compare_pgdata( + instance_before, + self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + ) + + # do PAGE + page_backup_id = self.pb.backup_node('node', node, backup_type="page", + options=['--external-dirs={0}'.format(external_dir), + '--note=test_note']) + instance_before = self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + + # Add some data changes + pgbench = node.pgbench(options=['-T', '2', '--no-vacuum']) + pgbench.wait() + + if self.ptrack: + node.safe_psql( + "postgres", + "create extension ptrack") + + if self.ptrack: + backup_id = self.pb.backup_node('node', node, backup_type='ptrack', + options=['--stream', '--external-dirs={0}'.format(external_dir), + '--note=test_note', '--dry-run']) + if self.ptrack: + backup_id = self.pb.backup_node('node', node, backup_type='ptrack', + options=['--external-dirs={0}'.format(external_dir), + '--note=test_note', '--dry-run']) + + show_backups = self.pb.show('node') + self.assertEqual(len(show_backups), 3) + + self.compare_pgdata( + instance_before, + self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + ) + + # do PTRACK + if self.ptrack: + backup_id = self.pb.backup_node('node', node, backup_type='ptrack', + options=['--stream', '--external-dirs={0}'.format(external_dir), + '--note=test_note']) + + out = self.pb.validate('node', backup_id) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + out) + # Cleanup + node.stop() + + @unittest.skipIf(not fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_basic_dry_run_check_backup_with_access(self): + """ + Access check suite if disk mounted as read_only + """ + node = self.pg_node.make_simple('node', + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=20, no_vacuum=True) + + # FULL backup + self.pb.backup_node('node', node, options=['--dry-run', '--stream', '--log-level-file=verbose']) + + check_permissions_dir = ['backups', 'wal'] + for dir in check_permissions_dir: + # Access check suit if disk mounted as read_only + dir_path = os.path.join(backup_dir, dir) + dir_mode = os.stat(dir_path).st_mode + os.chmod(dir_path, 0o400) + print(backup_dir) + + try: + error_message = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream', '--dry-run'], + expect_error="because of changed permissions") + + self.assertMessage(error_message, contains='Permission denied') + finally: + # Cleanup + os.chmod(dir_path, dir_mode) + os.chmod(dir_path, 0o500) + print(backup_dir) + + try: + error_message = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream', '--dry-run'], + expect_error="because of changed permissions") + + + self.assertMessage(error_message, contains='ERROR: Check permissions') + finally: + # Cleanup + os.chmod(dir_path, dir_mode) + + node.stop() + node.cleanup() + + def string_in_file(self, file_path, str): + with open(file_path, 'r') as file: + # read all content of a file + content = file.read() + # check if string present in a file + if str in content: + return True + else: + return False + + def test_dry_run_restore_point_absence(self): + node = self.pg_node.make_simple('node', + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=100, no_vacuum=True) + + data_dir = node.data_dir + + backup_id = self.pb.backup_node('node', node, options=['--dry-run']) + + node.stop() + + restore_point = self.string_in_file(os.path.join(node.logs_dir, "postgresql.log"), "restore point") + self.assertFalse(restore_point, "String should not exist: {0}".format("restore point")) + + @needs_gdb + def test_dry_run_backup_kill_process(self): + node = self.pg_node.make_simple('node', + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=20, no_vacuum=True) + + backup_dir = self.backup_dir + + content_before = self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + # FULL backup + gdb = self.pb.backup_node('node', node, options=['--dry-run', '--stream', '--log-level-file=verbose'], + gdb=True) + + gdb.set_breakpoint('backup_files') + gdb.run_until_break() + gdb.signal('SIGTERM') + gdb.continue_execution_until_error() + + self.compare_pgdata( + content_before, + self.pgdata_content(os.path.join(backup_dir, 'backups', 'node')) + ) + + gdb.kill() + node.stop() + + def test_limit_rate_full_backup(self): + """ + Test full backup with slow down to 8MBps speed + """ + set_rate_limit = 8 + node = self.pg_node.make_simple('node', + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=5, no_vacuum=True) + + # FULL backup with rate limit + backup_id = self.pb.backup_node("node", node, options=['--write-rate-limit='+str(set_rate_limit)]) + + # Validate backup + out = self.pb.validate('node', backup_id) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + out) + + # Calculate time from start to end of backup + show_backup = self.pb.show("node") + backup_time = (datetime.strptime(show_backup[0]["end-time"]+"00", "%Y-%m-%d %H:%M:%S%z") - + datetime.strptime(show_backup[0]["start-time"]+"00", "%Y-%m-%d %H:%M:%S%z") + ).seconds + + # Calculate rate limit we've got in MBps and round it down + get_rate_limit = int(show_backup[0]["data-bytes"] / (1024 * 1024 * backup_time)) + + # Check that we are NOT faseter than expexted + self.assertLessEqual(get_rate_limit, set_rate_limit) diff --git a/tests/catchup_test.py b/tests/catchup_test.py index cf8388dd2..117ac0407 100644 --- a/tests/catchup_test.py +++ b/tests/catchup_test.py @@ -1,10 +1,15 @@ import os +import subprocess from pathlib import Path -import signal -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest +from parameterized import parameterized -class CatchupTest(ProbackupTest, unittest.TestCase): +module_name = 'catchup' + + +class CatchupTest(ProbackupTest): + def setUp(self): + self.fname = self.id().split('.')[3] ######################################### # Basic tests @@ -14,8 +19,7 @@ def test_basic_full_catchup(self): Test 'multithreaded basebackup' mode (aka FULL catchup) """ # preparation - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True ) src_pg.slow_start() @@ -25,8 +29,8 @@ def test_basic_full_catchup(self): src_query_result = src_pg.table_checksum("ultimate_question") # do full catchup - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -43,7 +47,7 @@ def test_basic_full_catchup(self): src_pg.stop() dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() # 2nd check: run verification query @@ -54,13 +58,165 @@ def test_basic_full_catchup(self): dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') + + @parameterized.expand(("DELTA", "PTRACK")) + def test_cascade_catchup(self, test_input): + """ + Test catchup of catchup'ed node + """ + # preparation + + if test_input == "PTRACK" and not self.ptrack: + self.skipTest("Ptrack is disabled, test_cascade_catchup") + elif test_input == "PTRACK" and self.ptrack: + db1 = self.pg_node.make_simple('db1', set_replication = True, ptrack_enable=True) + else: + db1 = self.pg_node.make_simple('db1', set_replication = True) + + db1.slow_start() + + if test_input == "PTRACK": + db1.safe_psql("postgres", "CREATE EXTENSION ptrack") + + db1.safe_psql( + "postgres", + "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + db1_query_result = db1.table_checksum("ultimate_question") + + # full catchup db1 -> db2 + db2 = self.pg_node.make_empty('db2') + self.pb.catchup_node( + backup_mode = 'FULL', + source_pgdata = db1.data_dir, + destination_node = db2, + options = ['-d', 'postgres', '-p', str(db1.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(db1.data_dir), + self.pgdata_content(db2.data_dir) + ) + + # run&recover catchup'ed instance + self.set_replica(db1, db2) + db2_options = {} + db2_options['port'] = str(db2.port) + db2.set_auto_conf(db2_options) + db2.slow_start(replica = True) + + # 2nd check: run verification query + db2_query_result = db2.table_checksum("ultimate_question") + self.assertEqual(db1_query_result, db2_query_result, 'Different answer from copy 2') + + # full catchup db2 -> db3 + db3 = self.pg_node.make_empty('db3') + self.pb.catchup_node( + backup_mode = 'FULL', + source_pgdata = db2.data_dir, + destination_node = db3, + options = ['-d', 'postgres', '-p', str(db2.port), '--stream'] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(db2.data_dir), + self.pgdata_content(db3.data_dir) + ) + + # run&recover catchup'ed instance + self.set_replica(db2, db3) + db3_options = {} + db3_options['port'] = str(db3.port) + db3.set_auto_conf(db3_options) + db3.slow_start(replica = True) + + db3_query_result = db3.table_checksum("ultimate_question") + self.assertEqual(db2_query_result, db3_query_result, 'Different answer from copy 3') + + db2.stop() + db3.stop() + + # data modifications before incremental catchups + db1.safe_psql( + "postgres", + "UPDATE ultimate_question SET answer = -1") + db1.safe_psql("postgres", "CHECKPOINT") + + # do first incremental catchup + self.pb.catchup_node( + backup_mode = test_input, + source_pgdata = db1.data_dir, + destination_node = db2, + options = ['-d', 'postgres', '-p', str(db1.port), '--stream'] + ) + + self.compare_pgdata( + self.pgdata_content(db1.data_dir), + self.pgdata_content(db2.data_dir) + ) + + self.set_replica(db1, db2) + db2_options = {} + db2_options['port'] = str(db2.port) + db2.set_auto_conf(db2_options) + db2.slow_start(replica = True) + + # do second incremental catchup + self.pb.catchup_node( + backup_mode = test_input, + source_pgdata = db2.data_dir, + destination_node = db3, + options = ['-d', 'postgres', '-p', str(db2.port), '--stream'] + ) + + self.compare_pgdata( + self.pgdata_content(db2.data_dir), + self.pgdata_content(db3.data_dir) + ) + + self.set_replica(db2, db3) + db3_options = {} + db3_options['port'] = str(db3.port) + db3.set_auto_conf(db3_options) + self.pb.set_archiving('db3', db3, replica=True) + db3.slow_start(replica = True) + + # data modification for checking continuous archiving + db1.safe_psql( + "postgres", + "DROP TABLE ultimate_question") + db1.safe_psql("postgres", "CHECKPOINT") + + self.wait_until_replica_catch_with_master(db1, db2) + self.wait_until_replica_catch_with_master(db1, db3) + + db1_query_result = db1.table_checksum("pg_class") + db2_query_result = db2.table_checksum("pg_class") + db3_query_result = db3.table_checksum("pg_class") + + self.assertEqual(db1_query_result, db2_query_result, 'Different answer from copy 2') + self.assertEqual(db2_query_result, db3_query_result, 'Different answer from copy 3') + + db1_query_result = db1.table_checksum("pg_depend") + db2_query_result = db2.table_checksum("pg_depend") + db3_query_result = db3.table_checksum("pg_depend") + + self.assertEqual(db1_query_result, db2_query_result, 'Different answer from copy 2') + self.assertEqual(db2_query_result, db3_query_result, 'Different answer from copy 3') + + # cleanup + db3.stop() + db2.stop() + db1.stop() + + def test_full_catchup_with_tablespace(self): """ Test tablespace transfers """ # preparation - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True ) src_pg.slow_start() @@ -72,9 +228,9 @@ def test_full_catchup_with_tablespace(self): src_query_result = src_pg.table_checksum("ultimate_question") # do full catchup with tablespace mapping - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + dst_pg = self.pg_node.make_empty('dst') tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') - self.catchup_node( + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -101,7 +257,7 @@ def test_full_catchup_with_tablespace(self): # run&recover catchup'ed instance dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() # 2nd check: run verification query @@ -116,8 +272,7 @@ def test_basic_delta_catchup(self): Test delta catchup """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -127,8 +282,8 @@ def test_basic_delta_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -137,7 +292,7 @@ def test_basic_delta_catchup(self): self.set_replica(src_pg, dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) dst_pg.stop() @@ -149,7 +304,7 @@ def test_basic_delta_catchup(self): src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -167,7 +322,7 @@ def test_basic_delta_catchup(self): self.set_replica(master = src_pg, replica = dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) # 2nd check: run verification query @@ -186,12 +341,7 @@ def test_basic_ptrack_catchup(self): self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), - set_replication = True, - ptrack_enable = True, - initdb_params = ['--data-checksums'] - ) + src_pg = self.pg_node.make_simple('src', set_replication=True, ptrack_enable=True) src_pg.slow_start() src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") src_pg.safe_psql( @@ -199,8 +349,8 @@ def test_basic_ptrack_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -209,7 +359,7 @@ def test_basic_ptrack_catchup(self): self.set_replica(src_pg, dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) dst_pg.stop() @@ -221,7 +371,7 @@ def test_basic_ptrack_catchup(self): src_query_result = src_pg.table_checksum("ultimate_question") # do ptrack catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -239,7 +389,7 @@ def test_basic_ptrack_catchup(self): self.set_replica(master = src_pg, replica = dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) # 2nd check: run verification query @@ -255,16 +405,15 @@ def test_tli_delta_catchup(self): Test that we correctly follow timeline change with delta catchup """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -272,7 +421,7 @@ def test_tli_delta_catchup(self): ) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() @@ -285,7 +434,7 @@ def test_tli_delta_catchup(self): src_query_result = src_pg.table_checksum("ultimate_question") # do catchup (src_tli = 2, dst_tli = 1) - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -301,7 +450,7 @@ def test_tli_delta_catchup(self): # run&recover catchup'ed instance dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) self.set_replica(master = src_pg, replica = dst_pg) dst_pg.slow_start(replica = True) @@ -312,7 +461,7 @@ def test_tli_delta_catchup(self): dst_pg.stop() # do catchup (src_tli = 2, dst_tli = 2) - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -330,18 +479,13 @@ def test_tli_ptrack_catchup(self): self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), - set_replication = True, - ptrack_enable = True, - initdb_params = ['--data-checksums'] - ) + src_pg = self.pg_node.make_simple('src', set_replication=True, ptrack_enable=True) src_pg.slow_start() src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -349,7 +493,7 @@ def test_tli_ptrack_catchup(self): ) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() @@ -367,7 +511,7 @@ def test_tli_ptrack_catchup(self): src_query_result = src_pg.table_checksum("ultimate_question") # do catchup (src_tli = 2, dst_tli = 1) - self.catchup_node( + self.pb.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -383,7 +527,7 @@ def test_tli_ptrack_catchup(self): # run&recover catchup'ed instance dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) self.set_replica(master = src_pg, replica = dst_pg) dst_pg.slow_start(replica = True) @@ -394,7 +538,7 @@ def test_tli_ptrack_catchup(self): dst_pg.stop() # do catchup (src_tli = 2, dst_tli = 2) - self.catchup_node( + self.pb.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -412,8 +556,7 @@ def test_table_drop_with_delta(self): Test that dropped table in source will be dropped in delta catchup'ed instance too """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -423,8 +566,8 @@ def test_table_drop_with_delta(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -432,7 +575,7 @@ def test_table_drop_with_delta(self): ) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() @@ -443,7 +586,7 @@ def test_table_drop_with_delta(self): src_pg.safe_psql("postgres", "CHECKPOINT") # do delta catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -467,12 +610,9 @@ def test_table_drop_with_ptrack(self): self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, - ptrack_enable = True, - initdb_params = ['--data-checksums'] - ) + ptrack_enable = True) src_pg.slow_start() src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") src_pg.safe_psql( @@ -480,8 +620,8 @@ def test_table_drop_with_ptrack(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -489,7 +629,7 @@ def test_table_drop_with_ptrack(self): ) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() @@ -500,7 +640,7 @@ def test_table_drop_with_ptrack(self): src_pg.safe_psql("postgres", "CHECKPOINT") # do ptrack catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -521,8 +661,7 @@ def test_tablefile_truncation_with_delta(self): Test that truncated table in source will be truncated in delta catchup'ed instance too """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -537,8 +676,8 @@ def test_tablefile_truncation_with_delta(self): src_pg.safe_psql("postgres", "VACUUM t_heap") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -547,7 +686,7 @@ def test_tablefile_truncation_with_delta(self): dest_options = {} dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() @@ -556,7 +695,7 @@ def test_tablefile_truncation_with_delta(self): src_pg.safe_psql("postgres", "VACUUM t_heap") # do delta catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -580,12 +719,9 @@ def test_tablefile_truncation_with_ptrack(self): self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, - ptrack_enable = True, - initdb_params = ['--data-checksums'] - ) + ptrack_enable = True) src_pg.slow_start() src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") src_pg.safe_psql( @@ -598,8 +734,8 @@ def test_tablefile_truncation_with_ptrack(self): src_pg.safe_psql("postgres", "VACUUM t_heap") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -608,7 +744,7 @@ def test_tablefile_truncation_with_ptrack(self): dest_options = {} dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() @@ -617,7 +753,7 @@ def test_tablefile_truncation_with_ptrack(self): src_pg.safe_psql("postgres", "VACUUM t_heap") # do ptrack catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -643,7 +779,7 @@ def test_local_tablespace_without_mapping(self): if self.remote: self.skipTest('Skipped because this test tests local catchup error handling') - src_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'src')) + src_pg = self.pg_node.make_simple('src') src_pg.slow_start() tblspace_path = self.get_tblspace_path(src_pg, 'tblspace') @@ -655,9 +791,8 @@ def test_local_tablespace_without_mapping(self): "postgres", "CREATE TABLE ultimate_question TABLESPACE tblspace AS SELECT 42 AS answer") - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - try: - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -665,15 +800,11 @@ def test_local_tablespace_without_mapping(self): '-d', 'postgres', '-p', str(src_pg.port), '--stream', - ] + ], + expect_error="because '-T' parameter is not specified" ) - self.assertEqual(1, 0, "Expecting Error because '-T' parameter is not specified.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Local catchup executed, but source database contains tablespace', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: Local catchup executed, but source ' + 'database contains tablespace') # Cleanup src_pg.stop() @@ -683,16 +814,15 @@ def test_running_dest_postmaster(self): Test that we detect running postmaster in destination """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -700,26 +830,20 @@ def test_running_dest_postmaster(self): ) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() # leave running destination postmaster # so don't call dst_pg.stop() # try delta catchup - try: - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'], + expect_error="because postmaster in destination is running" ) - self.assertEqual(1, 0, "Expecting Error because postmaster in destination is running.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Postmaster with pid ', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: Postmaster with pid ') # Cleanup src_pg.stop() @@ -730,14 +854,13 @@ def test_same_db_id(self): """ # preparation: # source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True ) src_pg.slow_start() # destination - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -745,45 +868,33 @@ def test_same_db_id(self): ) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() # fake destination - fake_dst_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'fake_dst')) + fake_dst_pg = self.pg_node.make_simple('fake_dst') # fake source - fake_src_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'fake_src')) + fake_src_pg = self.pg_node.make_simple('fake_src') # try delta catchup (src (with correct src conn), fake_dst) - try: - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = fake_dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'], + expect_error="because database identifiers mismatch" ) - self.assertEqual(1, 0, "Expecting Error because database identifiers mismatch.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Database identifiers mismatch: ', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: Database identifiers mismatch: ') # try delta catchup (fake_src (with wrong src conn), dst) - try: - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = fake_src_pg.data_dir, destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'], + expect_error="because database identifiers mismatch" ) - self.assertEqual(1, 0, "Expecting Error because database identifiers mismatch.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Database identifiers mismatch: ', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: Database identifiers mismatch: ') # Cleanup src_pg.stop() @@ -793,16 +904,15 @@ def test_tli_destination_mismatch(self): Test that we detect TLI mismatch in destination """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -810,7 +920,7 @@ def test_tli_destination_mismatch(self): ) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) self.set_replica(src_pg, dst_pg) dst_pg.slow_start(replica = True) dst_pg.promote() @@ -818,28 +928,16 @@ def test_tli_destination_mismatch(self): # preparation 3: "useful" changes src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.table_checksum("ultimate_question") # try catchup - try: - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'], + expect_error="because of stale timeline", ) - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - dst_query_result = dst_pg.table_checksum("ultimate_question") - dst_pg.stop() - self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') - except ProbackupException as e: - self.assertIn( - 'ERROR: Source is behind destination in timeline history', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: Source is behind destination in timeline history') # Cleanup src_pg.stop() @@ -849,16 +947,15 @@ def test_tli_source_mismatch(self): Test that we detect TLI mismatch in source history """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: fake source (promouted copy) - fake_src_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'fake_src')) - self.catchup_node( + fake_src_pg = self.pg_node.make_empty('fake_src') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = fake_src_pg, @@ -866,7 +963,7 @@ def test_tli_source_mismatch(self): ) fake_src_options = {} fake_src_options['port'] = str(fake_src_pg.port) - self.set_auto_conf(fake_src_pg, fake_src_options) + fake_src_pg.set_auto_conf(fake_src_options) self.set_replica(src_pg, fake_src_pg) fake_src_pg.slow_start(replica = True) fake_src_pg.promote() @@ -881,8 +978,8 @@ def test_tli_source_mismatch(self): fake_src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 'trash' AS garbage") # preparation 3: destination - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -890,34 +987,22 @@ def test_tli_source_mismatch(self): ) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() # preparation 4: "useful" changes src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.table_checksum("ultimate_question") # try catchup - try: - self.catchup_node( - backup_mode = 'DELTA', - source_pgdata = fake_src_pg.data_dir, - destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(fake_src_pg.port), '--stream'] - ) - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - dst_query_result = dst_pg.table_checksum("ultimate_question") - dst_pg.stop() - self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') - except ProbackupException as e: - self.assertIn( - 'ERROR: Destination is not in source timeline history', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.catchup_node( + backup_mode = 'DELTA', + source_pgdata = fake_src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(fake_src_pg.port), '--stream'], + expect_error="because of future timeline", + ) + self.assertMessage(contains='ERROR: Destination is not in source timeline history') # Cleanup src_pg.stop() @@ -931,8 +1016,7 @@ def test_unclean_delta_catchup(self): Test that we correctly recover uncleanly shutdowned destination """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -942,8 +1026,8 @@ def test_unclean_delta_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -951,25 +1035,19 @@ def test_unclean_delta_catchup(self): ) # try #1 - try: - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'], + expect_error="because destination pg is not cleanly shutdowned" ) - self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Destination directory contains "backup_label" file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: Destination directory contains "backup_label" file') # try #2 dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") dst_pg.kill() @@ -982,7 +1060,7 @@ def test_unclean_delta_catchup(self): src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1000,7 +1078,7 @@ def test_unclean_delta_catchup(self): self.set_replica(master = src_pg, replica = dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) # 2nd check: run verification query @@ -1018,8 +1096,7 @@ def test_unclean_ptrack_catchup(self): self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, ptrack_enable = True, pg_options = { 'wal_log_hints': 'on' } @@ -1031,8 +1108,8 @@ def test_unclean_ptrack_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1040,25 +1117,19 @@ def test_unclean_ptrack_catchup(self): ) # try #1 - try: - self.catchup_node( + self.pb.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'], + expect_error="because destination pg is not cleanly shutdowned" ) - self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Destination directory contains "backup_label" file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: Destination directory contains "backup_label" file') # try #2 dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") dst_pg.kill() @@ -1071,7 +1142,7 @@ def test_unclean_ptrack_catchup(self): src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1089,7 +1160,7 @@ def test_unclean_ptrack_catchup(self): self.set_replica(master = src_pg, replica = dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) # 2nd check: run verification query @@ -1117,48 +1188,41 @@ def test_catchup_with_replication_slot(self): """ """ # preparation - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True ) src_pg.slow_start() # 1a. --slot option - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_1a')) - try: - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst_1a') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, options = [ '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--slot=nonexistentslot_1a' - ] + '--slot=nonexistentslot_1a', '--temp-slot=false' + ], + expect_error="because replication slot does not exist" ) - self.assertEqual(1, 0, "Expecting Error because replication slot does not exist.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: replication slot "nonexistentslot_1a" does not exist', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: replication slot "nonexistentslot_1a" does not exist') # 1b. --slot option - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_1b')) + dst_pg = self.pg_node.make_empty('dst_1b') src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_1b')") - self.catchup_node( + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, options = [ '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--slot=existentslot_1b' + '--slot=existentslot_1b', '--temp-slot=false' ] ) # 2a. --slot --perm-slot - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_2a')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst_2a') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1170,10 +1234,9 @@ def test_catchup_with_replication_slot(self): ) # 2b. and 4. --slot --perm-slot - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_2b')) + dst_pg = self.pg_node.make_empty('dst_2b') src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_2b')") - try: - self.catchup_node( + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1181,19 +1244,14 @@ def test_catchup_with_replication_slot(self): '-d', 'postgres', '-p', str(src_pg.port), '--stream', '--slot=existentslot_2b', '--perm-slot' - ] + ], + expect_error="because replication slot already exist" ) - self.assertEqual(1, 0, "Expecting Error because replication slot already exist.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: replication slot "existentslot_2b" already exists', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.assertMessage(contains='ERROR: replication slot "existentslot_2b" already exists') # 3. --perm-slot --slot - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_3')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst_3') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1210,29 +1268,47 @@ def test_catchup_with_replication_slot(self): ).decode('utf-8').rstrip() self.assertEqual(slot_name, 'pg_probackup_perm_slot', 'Slot name mismatch') - # 5. --perm-slot --temp-slot (PG>=10) - if self.get_version(src_pg) >= self.version_to_num('10.0'): - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_5')) - try: - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--perm-slot', - '--temp-slot' - ] - ) - self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + # 5. --perm-slot --temp-slot + dst_pg = self.pg_node.make_empty('dst_5a') + self.pb.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot', + '--temp-slot' + ], + expect_error="because conflicting options --perm-slot and --temp-slot used together" + ) + self.assertMessage(contains='ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option') + + dst_pg = self.pg_node.make_empty('dst_5b') + self.pb.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot', + '--temp-slot=true' + ], + expect_error="because conflicting options --perm-slot and --temp-slot used together" + ) + self.assertMessage(contains='ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option') - #self.assertEqual(1, 0, 'Stop test') + dst_pg = self.pg_node.make_empty('dst_5c') + self.pb.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot', + '--temp-slot=false', + '--slot=dst_5c' + ], + ) ######################################### # --exclude-path @@ -1242,8 +1318,7 @@ def test_catchup_with_exclude_path(self): various syntetic tests for --exclude-path option """ # preparation - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True ) src_pg.slow_start() @@ -1260,8 +1335,8 @@ def test_catchup_with_exclude_path(self): f.flush() f.close - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1279,7 +1354,7 @@ def test_catchup_with_exclude_path(self): self.set_replica(src_pg, dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) dst_pg.stop() @@ -1291,7 +1366,7 @@ def test_catchup_with_exclude_path(self): f.flush() f.close - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1317,8 +1392,7 @@ def test_config_exclusion(self): Test that catchup can preserve dest replication config """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -1328,8 +1402,8 @@ def test_config_exclusion(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1338,7 +1412,7 @@ def test_config_exclusion(self): self.set_replica(src_pg, dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg._assign_master(src_pg) dst_pg.slow_start(replica = True) dst_pg.stop() @@ -1349,7 +1423,7 @@ def test_config_exclusion(self): pgbench.wait() # test 1: do delta catchup with relative exclusion paths - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1379,7 +1453,7 @@ def test_config_exclusion(self): pgbench.wait() # test 2: do delta catchup with absolute source exclusion paths - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1408,7 +1482,7 @@ def test_config_exclusion(self): pgbench.wait() # test 3: do delta catchup with absolute destination exclusion paths - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1444,14 +1518,13 @@ def test_dry_run_catchup_full(self): Test dry-run option for full catchup """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True ) src_pg.slow_start() # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + dst_pg = self.pg_node.make_empty('dst') src_pg.pgbench_init(scale = 10) pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) @@ -1461,7 +1534,7 @@ def test_dry_run_catchup_full(self): content_before = self.pgdata_content(dst_pg.data_dir) # do full catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1485,12 +1558,9 @@ def test_dry_run_catchup_ptrack(self): self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, - ptrack_enable = True, - initdb_params = ['--data-checksums'] - ) + ptrack_enable = True) src_pg.slow_start() src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") @@ -1499,8 +1569,8 @@ def test_dry_run_catchup_ptrack(self): pgbench.wait() # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1509,7 +1579,7 @@ def test_dry_run_catchup_ptrack(self): self.set_replica(src_pg, dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) dst_pg.stop() @@ -1517,7 +1587,7 @@ def test_dry_run_catchup_ptrack(self): content_before = self.pgdata_content(dst_pg.data_dir) # do incremental catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'PTRACK', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1539,10 +1609,8 @@ def test_dry_run_catchup_delta(self): """ # preparation 1: source - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication = True, - initdb_params = ['--data-checksums'], pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() @@ -1552,8 +1620,8 @@ def test_dry_run_catchup_delta(self): pgbench.wait() # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1562,7 +1630,7 @@ def test_dry_run_catchup_delta(self): self.set_replica(src_pg, dst_pg) dst_options = {} dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start(replica = True) dst_pg.stop() @@ -1570,7 +1638,7 @@ def test_dry_run_catchup_delta(self): content_before = self.pgdata_content(dst_pg.data_dir) # do delta catchup - self.catchup_node( + self.pb.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, @@ -1591,14 +1659,14 @@ def test_pgdata_is_ignored(self): or from the env var. This test that PGDATA is actually ignored and --source-pgadta is used instead """ - node = self.make_simple_node('node', + node = self.pg_node.make_simple('node', set_replication = True ) node.slow_start() # do full catchup - dest = self.make_empty_node('dst') - self.catchup_node( + dest = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = node.data_dir, destination_node = dest, @@ -1610,10 +1678,10 @@ def test_pgdata_is_ignored(self): self.pgdata_content(dest.data_dir) ) - os.environ['PGDATA']='xxx' + self.test_env['PGDATA']='xxx' - dest2 = self.make_empty_node('dst') - self.catchup_node( + dest2 = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode = 'FULL', source_pgdata = node.data_dir, destination_node = dest2, @@ -1624,3 +1692,371 @@ def test_pgdata_is_ignored(self): self.pgdata_content(node.data_dir), self.pgdata_content(dest2.data_dir) ) + + def test_catchup_from_standby_single_wal(self): + """ Make a standby node, with a single WAL file in it and try to catchup """ + node = self.pg_node.make_simple('node', + pg_options={'hot_standby': 'on'}) + node.set_auto_conf({}, 'postgresql.conf', ['max_worker_processes']) + standby_signal = os.path.join(node.data_dir, 'standby.signal') + with open(standby_signal, 'w') as fout: + fout.flush() + fout.close() + node.start() + + # No inserts to keep WAL size small + + dest = self.pg_node.make_empty('dst') + + self.pb.catchup_node( + backup_mode='FULL', + source_pgdata=node.data_dir, + destination_node=dest, + options = ['-d', 'postgres', '-p', str(node.port), '--stream'] + ) + + dst_options = {} + dst_options['port'] = str(dest.port) + dest.set_auto_conf(dst_options) + + dest.slow_start() + res = dest.safe_psql("postgres", "select 1").decode('utf-8').strip() + self.assertEqual(res, "1") + + def test_catchup_ptrack_unlogged(self): + """ catchup + ptrack when unlogged tables exist """ + node = self.pg_node.make_simple('node', ptrack_enable = True) + node.slow_start() + node.safe_psql("postgres", "CREATE EXTENSION ptrack") + + dest = self.pg_node.make_empty('dst') + + self.pb.catchup_node( + backup_mode='FULL', + source_pgdata=node.data_dir, + destination_node=dest, + options = ['-d', 'postgres', '-p', str(node.port), '--stream'] + ) + + for i in range(1,7): + node.safe_psql('postgres', 'create unlogged table t' + str(i) + ' (id int, name text);') + + dst_options = {} + dst_options['port'] = str(dest.port) + dest.set_auto_conf(dst_options) + + dest.slow_start() + dest.stop() + + self.pb.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = node.data_dir, + destination_node = dest, + options = ['-d', 'postgres', '-p', str(node.port), '--stream', '--dry-run'] + ) + + return + + def test_catchup_instance_from_the_past(self): + src_pg = self.pg_node.make_simple('src', + set_replication=True + ) + src_pg.slow_start() + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( + backup_mode='FULL', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + dst_options = {'port': str(dst_pg.port)} + dst_pg.set_auto_conf(dst_options) + dst_pg.slow_start() + dst_pg.pgbench_init(scale=10) + pgbench = dst_pg.pgbench( + stdout=subprocess.PIPE, + options=["-c", "4", "-T", "20"]) + pgbench.wait() + pgbench.stdout.close() + dst_pg.stop() + self.pb.catchup_node( + backup_mode='DELTA', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + options=[ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream' + ], + expect_error="because instance is from the past" + ) + + self.assertMessage(regex='ERROR: Current START LSN .* is lower than SYNC LSN') + self.assertMessage(contains='it may indicate that we are trying to catchup ' + 'with PostgreSQL instance from the past') + + +######################################### +# --waldir +######################################### + + def test_waldir_option(self): + """ + Test waldir option for full catchup + """ + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + # preparation: source + src_pg = self.pg_node.make_simple('src', + set_replication = True, + ptrack_enable = True + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + + # do full catchup + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--waldir={0}'.format(os.path.join(self.test_path, 'tmp_new_wal_dir')), + ] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # 2nd check new waldir exists + self.assertTrue(Path(os.path.join(self.test_path, 'tmp_new_wal_dir')).exists()) + + #3rd check pg_wal is symlink + if src_pg.major_version >= 10: + wal_path = os.path.join(dst_pg.data_dir, "pg_wal") + else: + wal_path = os.path.join(dst_pg.data_dir, "pg_xlog") + + self.assertEqual(os.path.islink(wal_path), True) + print("FULL DONE ----------------------------------------------------------") + + """ + Test waldir otion for delta catchup to different directory from full catchup's wal directory + """ + # preparation 2: make clean shutdowned lagging behind replica + + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + dst_pg.set_auto_conf(dst_options) + dst_pg.slow_start(replica=True) + dst_pg.stop() + + # do delta catchup + self.pb.catchup_node( + backup_mode='DELTA', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--waldir={0}'.format(os.path.join(self.test_path, 'tmp_another_wal_dir')), + ], + expect_error="because we perform DELTA catchup's WAL in a different dir from FULL catchup's WAL dir", + ) + self.assertMessage(contains='ERROR: WAL directory does not egual to symlinked pg_wal path') + + print("ANOTHER DIR DONE ------------------------------------------------") + + """ + Test waldir otion to delta catchup + """ + + self.set_replica(src_pg, dst_pg) + dst_pg._assign_master(src_pg) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # preparation 3: make changes on master (source) + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '2', '--no-vacuum']) + pgbench.wait() + + # do delta catchup + self.pb.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--waldir={0}'.format(os.path.join(self.test_path, 'tmp_new_wal_dir')), + ], + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # 2nd check new waldir exists + self.assertTrue(Path(os.path.join(self.test_path, 'tmp_new_wal_dir')).exists()) + + #3rd check pg_wal is symlink + if src_pg.major_version >= 10: + wal_path = os.path.join(dst_pg.data_dir, "pg_wal") + else: + wal_path = os.path.join(dst_pg.data_dir, "pg_xlog") + + self.assertEqual(os.path.islink(wal_path), True) + + print ("DELTA DONE---------------------------------------------------------") + + """ + Test waldir option for catchup in incremental ptrack mode + """ + self.set_replica(src_pg, dst_pg) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # preparation 3: make changes on master (source) + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '2', '--no-vacuum']) + pgbench.wait() + + # do incremental catchup + self.pb.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--waldir={0}'.format(os.path.join(self.test_path, 'tmp_new_wal_dir')), + ] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # 2nd check new waldir exists + self.assertTrue(Path(os.path.join(self.test_path, 'tmp_new_wal_dir')).exists()) + + #3rd check pg_wal is symlink + if src_pg.major_version >= 10: + wal_path = os.path.join(dst_pg.data_dir, "pg_wal") + else: + wal_path = os.path.join(dst_pg.data_dir, "pg_xlog") + + self.assertEqual(os.path.islink(wal_path), True) + + print ("PTRACK DONE -----------------------------------------------------------") + + """ + Test waldir option for full catchup to not empty WAL directory + """ + + dst_pg = self.pg_node.make_empty('dst2') + self.pb.catchup_node( + backup_mode='FULL', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + options=[ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--waldir={0}'.format(os.path.join(self.test_path, 'tmp_new_wal_dir')), + ], + + expect_error="because full catchup's WAL must be perform into empty directory", + ) + self.assertMessage(contains='ERROR: Can\'t perform FULL catchup with non-empty pg_wal directory') + + print ("ANOTHER FULL DONE -----------------------------------------------------") + # Cleanup + src_pg.stop() + + + def test_waldir_delta_catchup_without_full(self): + """ + Test waldir otion with delta catchup without using it doing full + """ + # preparation 1: source + src_pg = self.pg_node.make_simple('src', + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', + ], + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + dst_pg.set_auto_conf(dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # do delta catchup + self.pb.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--waldir={0}'.format(os.path.join(self.test_path, 'tmp_new_wal_dir')), + ], + expect_error="because we didn't perform FULL catchup's WAL before DELTA catchup", + ) + self.assertMessage(contains='ERROR: Unable to read pg_wal symbolic link') + + # Cleanup + src_pg.stop() + + + def test_waldir_dry_run_catchup_full(self): + """ + Test waldir with dry-run option for full catchup + """ + # preparation 1: source + src_pg = self.pg_node.make_simple('src', + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.pg_node.make_empty('dst') + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do full catchup + self.pb.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run', + '--waldir={0}'.format(os.path.join(self.test_path, 'tmp_new_wal_dir')), + ] + ) + + # compare data dirs before and after catchup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + self.assertFalse(Path(os.path.join(self.test_path, 'tmp_new_wal_dir')).exists()) + # Cleanup + src_pg.stop() + diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py deleted file mode 100644 index fb4a6c6b8..000000000 --- a/tests/cfs_backup_test.py +++ /dev/null @@ -1,1216 +0,0 @@ -import os -import unittest -import random -import shutil - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' - - -class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): - # --- Begin --- # - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def setUp(self): - self.backup_dir = os.path.join( - self.tmp_path, self.module_name, self.fname, 'backup') - self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'cfs_encryption': 'off', - 'max_wal_senders': '2', - 'shared_buffers': '200MB' - } - ) - - self.init_pb(self.backup_dir) - self.add_instance(self.backup_dir, 'node', self.node) - self.set_archiving(self.backup_dir, 'node', self.node) - - self.node.slow_start() - - self.node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) - - tblspace = self.node.safe_psql( - "postgres", - "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format( - tblspace_name)) - - self.assertIn( - tblspace_name, str(tblspace), - "ERROR: The tablespace not created " - "or it create without compressions") - - self.assertIn( - "compression=true", str(tblspace), - "ERROR: The tablespace not created " - "or it create without compressions") - - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - - # --- Section: Full --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace(self): - """Case: Check fullbackup empty compressed tablespace""" - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_stream(self): - """Case: Check fullbackup empty compressed tablespace with options stream""" - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - # PGPRO-1018 invalid file size - def test_fullbackup_after_create_table(self): - """Case: Make full backup after created table in the tablespace""" - if not self.enterprise: - return - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "\n ERROR: {0}\n CMD: {1}".format( - repr(e.message), - repr(self.cmd) - ) - ) - return False - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in {0}".format( - os.path.join(self.backup_dir, 'node', backup_id)) - ) - - # check cfm size - cfms = find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - # PGPRO-1018 invalid file size - def test_fullbackup_after_create_table_stream(self): - """ - Case: Make full backup after created table in the tablespace with option --stream - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - # --- Section: Incremental from empty tablespace --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_ptrack_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='ptrack') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_ptrack_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='ptrack', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['_ptrack']), - "ERROR: _ptrack files was found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_page_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make page backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_page_doesnt_store_unchanged_cfm(self): - """ - Case: Test page backup doesn't store cfm file if table were not modified - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - self.node.safe_psql("postgres", "checkpoint") - - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id_full)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files is found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_page_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace. - Make page backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='page', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['_ptrack']), - "ERROR: _ptrack files was found in backup dir" - ) - - # --- Section: Incremental from fill tablespace --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_ptrack_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_ptrack = None - try: - backup_id_ptrack = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='ptrack') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_ptrack = self.show_pb( - self.backup_dir, 'node', backup_id_ptrack) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_ptrack["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_ptrack["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace(--stream). - Make ptrack backup after create table(--stream). - Check: incremental backup size should not be greater than full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,25) i".format('t2', tblspace_name) - ) - - backup_id_ptrack = None - try: - backup_id_ptrack = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='ptrack', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_ptrack = self.show_pb( - self.backup_dir, 'node', backup_id_ptrack) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_ptrack["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_ptrack["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_page_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup size should not be greater than full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_page = None - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_multiple_segments(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap', tblspace_name) - ) - - full_result = self.node.table_checksum("t_heap") - - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap') - ) - - page_result = self.node.table_checksum("t_heap") - - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # CHECK FULL BACKUP - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - self.restore_node( - self.backup_dir, 'node', self.node, backup_id=backup_id_full, - options=[ - "-j", "4", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - - self.node.slow_start() - self.assertEqual( - full_result, - self.node.table_checksum("t_heap"), - 'Lost data after restore') - - # CHECK PAGE BACKUP - self.node.stop() - self.node.cleanup() - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name), - ignore_errors=True) - self.restore_node( - self.backup_dir, 'node', self.node, backup_id=backup_id_page, - options=[ - "-j", "4", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - - self.node.slow_start() - self.assertEqual( - page_result, - self.node.table_checksum("t_heap"), - 'Lost data after restore') - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_multiple_segments_in_multiple_tablespaces(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - tblspace_name_1 = 'tblspace_name_1' - tblspace_name_2 = 'tblspace_name_2' - - self.create_tblspace_in_node(self.node, tblspace_name_1, cfs=True) - self.create_tblspace_in_node(self.node, tblspace_name_2, cfs=True) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap_1', tblspace_name_1)) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap_2', tblspace_name_2)) - - full_result_1 = self.node.table_checksum("t_heap_1") - full_result_2 = self.node.table_checksum("t_heap_2") - - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap_1') - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap_2') - ) - - page_result_1 = self.node.table_checksum("t_heap_1") - page_result_2 = self.node.table_checksum("t_heap_2") - - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # CHECK FULL BACKUP - self.node.stop() - - self.restore_node( - self.backup_dir, 'node', self.node, - backup_id=backup_id_full, - options=[ - "-j", "4", "--incremental-mode=checksum", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - self.node.slow_start() - - self.assertEqual( - full_result_1, - self.node.table_checksum("t_heap_1"), - 'Lost data after restore') - self.assertEqual( - full_result_2, - self.node.table_checksum("t_heap_2"), - 'Lost data after restore') - - # CHECK PAGE BACKUP - self.node.stop() - - self.restore_node( - self.backup_dir, 'node', self.node, - backup_id=backup_id_page, - options=[ - "-j", "4", "--incremental-mode=checksum", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - self.node.slow_start() - - self.assertEqual( - page_result_1, - self.node.table_checksum("t_heap_1"), - 'Lost data after restore') - self.assertEqual( - page_result_2, - self.node.table_checksum("t_heap_2"), - 'Lost data after restore') - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_page_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace(--stream). - Make ptrack backup after create table(--stream). - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_page = None - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='page', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # --- Make backup with not valid data(broken .cfm) --- # - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_random_cfm_file_from_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - self.node.safe_psql( - "postgres", - "CHECKPOINT" - ) - - list_cmf = find_by_extensions( - [self.get_tblspace_path(self.node, tblspace_name)], - ['.cfm']) - self.assertTrue( - list_cmf, - "ERROR: .cfm-files not found into tablespace dir" - ) - - os.remove(random.choice(list_cmf)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_file_pg_compression_from_tablespace_dir(self): - os.remove( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression'])[0]) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_random_data_file_from_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - self.node.safe_psql( - "postgres", - "CHECKPOINT" - ) - - list_data_files = find_by_pattern( - [self.get_tblspace_path(self.node, tblspace_name)], - '^.*/\d+$') - self.assertTrue( - list_data_files, - "ERROR: Files of data not found into tablespace dir" - ) - - os.remove(random.choice(list_data_files)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_random_cfm_file_into_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - list_cmf = find_by_extensions( - [self.get_tblspace_path(self.node, tblspace_name)], - ['.cfm']) - self.assertTrue( - list_cmf, - "ERROR: .cfm-files not found into tablespace dir" - ) - - corrupt_file(random.choice(list_cmf)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_random_data_file_into_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - list_data_files = find_by_pattern( - [self.get_tblspace_path(self.node, tblspace_name)], - '^.*/\d+$') - self.assertTrue( - list_data_files, - "ERROR: Files of data not found into tablespace dir" - ) - - corrupt_file(random.choice(list_data_files)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_file_pg_compression_into_tablespace_dir(self): - - corrupted_file = find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression'])[0] - - self.assertTrue( - corrupt_file(corrupted_file), - "ERROR: File is not corrupted or it missing" - ) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - -# # --- End ---# - - -#class CfsBackupEncTest(CfsBackupNoEncTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsBackupEncTest, self).setUp() diff --git a/tests/cfs_catchup_test.py b/tests/cfs_catchup_test.py deleted file mode 100644 index f6760b72c..000000000 --- a/tests/cfs_catchup_test.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -import unittest -import random -import shutil - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - - -class CfsCatchupNoEncTest(ProbackupTest, unittest.TestCase): - - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_full_catchup_with_tablespace(self): - """ - Test tablespace transfers - """ - # preparation - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), - set_replication = True - ) - src_pg.slow_start() - tblspace1_old_path = self.get_tblspace_path(src_pg, 'tblspace1_old') - self.create_tblspace_in_node(src_pg, 'tblspace1', tblspc_path = tblspace1_old_path, cfs=True) - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") - src_query_result = src_pg.table_checksum("ultimate_question") - src_pg.safe_psql( - "postgres", - "CHECKPOINT") - - # do full catchup with tablespace mapping - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', - '-p', str(src_pg.port), - '--stream', - '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) - ] - ) - - # 1st check: compare data directories - self.compare_pgdata( - self.pgdata_content(src_pg.data_dir), - self.pgdata_content(dst_pg.data_dir) - ) - - # check cfm size - cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # make changes in master tablespace - src_pg.safe_psql( - "postgres", - "UPDATE ultimate_question SET answer = -1") - src_pg.safe_psql( - "postgres", - "CHECKPOINT") - - # run&recover catchup'ed instance - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - - # 2nd check: run verification query - dst_query_result = dst_pg.table_checksum("ultimate_question") - self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') - - # and now delta backup - dst_pg.stop() - - self.catchup_node( - backup_mode = 'DELTA', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', - '-p', str(src_pg.port), - '--stream', - '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) - ] - ) - - # check cfm size again - cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # run&recover catchup'ed instance - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - - - # 3rd check: run verification query - src_query_result = src_pg.table_checksum("ultimate_question") - dst_query_result = dst_pg.table_checksum("ultimate_question") - self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') diff --git a/tests/cfs_restore_test.py b/tests/cfs_restore_test.py deleted file mode 100644 index 2fa35e71a..000000000 --- a/tests/cfs_restore_test.py +++ /dev/null @@ -1,447 +0,0 @@ -""" -restore - Syntax: - - pg_probackup restore -B backupdir --instance instance_name - [-D datadir] - [ -i backup_id | [{--time=time | --xid=xid | --lsn=lsn } [--inclusive=boolean]]][--timeline=timeline] [-T OLDDIR=NEWDIR] - [-j num_threads] [--progress] [-q] [-v] - -""" -import os -import unittest -import shutil - -from .helpers.cfs_helpers import find_by_name -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' -tblspace_name_new = 'cfs_tblspace_new' - - -class CfsRestoreBase(ProbackupTest, unittest.TestCase): - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def setUp(self): - self.backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ -# 'ptrack_enable': 'on', - 'cfs_encryption': 'off', - } - ) - - self.init_pb(self.backup_dir) - self.add_instance(self.backup_dir, 'node', self.node) - self.set_archiving(self.backup_dir, 'node', self.node) - - self.node.slow_start() - self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) - - self.add_data_in_cluster() - - self.backup_id = None - try: - self.backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - def add_data_in_cluster(self): - pass - - -class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase): - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_empty_tablespace_from_fullbackup(self): - """ - Case: Restore empty tablespace from valid full backup. - """ - self.node.stop(["-m", "immediate"]) - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id) - except ProbackupException as e: - self.fail( - "ERROR: Restore failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ["pg_compression"]), - "ERROR: Restored data is not valid. pg_compression not found in tablespace dir." - ) - - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - tblspace = self.node.safe_psql( - "postgres", - "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) - ).decode("UTF-8") - self.assertTrue( - tblspace_name in tblspace and "compression=true" in tblspace, - "ERROR: The tablespace not restored or it restored without compressions" - ) - - -class CfsRestoreNoencTest(CfsRestoreBase): - def add_data_in_cluster(self): - self.node.safe_psql( - "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,1e5) i'.format('t1', tblspace_name) - ) - self.table_t1 = self.node.table_checksum("t1") - - # --- Restore from full backup ---# - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location(self): - """ - Case: Restore instance from valid full backup to old location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in tablespace dir" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - self.node.table_checksum("t1"), - self.table_t1 - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_3_jobs(self): - """ - Case: Restore instance from valid full backup to old location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id, options=['-j', '3']) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - self.node.table_checksum("t1"), - self.table_t1 - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_new_location(self): - """ - Case: Restore instance from valid full backup to new location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) - node_new.cleanup() - - try: - self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id) - self.set_auto_conf(node_new, {'port': node_new.port}) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - node_new.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - node_new.table_checksum("t1"), - self.table_t1 - ) - node_new.cleanup() - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_new_location_5_jobs(self): - """ - Case: Restore instance from valid full backup to new location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) - node_new.cleanup() - - try: - self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id, options=['-j', '5']) - self.set_auto_conf(node_new, {'port': node_new.port}) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - node_new.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - node_new.table_checksum("t1"), - self.table_t1 - ) - node_new.cleanup() - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new)) - - try: - self.restore_node( - self.backup_dir, - 'node', self.node, - backup_id=self.backup_id, - options=["-T", "{0}={1}".format( - self.get_tblspace_path(self.node, tblspace_name), - self.get_tblspace_path(self.node, tblspace_name_new) - ) - ] - ) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']), - "ERROR: File pg_compression not found in new tablespace location" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - self.node.table_checksum("t1"), - self.table_t1 - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs(self): - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new)) - - try: - self.restore_node( - self.backup_dir, - 'node', self.node, - backup_id=self.backup_id, - options=["-j", "3", "-T", "{0}={1}".format( - self.get_tblspace_path(self.node, tblspace_name), - self.get_tblspace_path(self.node, tblspace_name_new) - ) - ] - ) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']), - "ERROR: File pg_compression not found in new tablespace location" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - self.node.table_checksum("t1"), - self.table_t1 - ) - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_fullbackup_to_new_location_tablespace_new_location(self): - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_fullbackup_to_new_location_tablespace_new_location_5_jobs(self): - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack(self): - """ - Case: Restore from backup to old location - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack_jobs(self): - """ - Case: Restore from backup to old location, four jobs - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack_new_jobs(self): - pass - -# --------------------------------------------------------- # - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page(self): - """ - Case: Restore from backup to old location - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page_jobs(self): - """ - Case: Restore from backup to old location, four jobs - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page_new_jobs(self): - """ - Case: Restore from backup to new location, four jobs - """ - pass - - -#class CfsRestoreEncEmptyTablespaceTest(CfsRestoreNoencEmptyTablespaceTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsRestoreNoencEmptyTablespaceTest, self).setUp() -# -# -#class CfsRestoreEncTest(CfsRestoreNoencTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsRestoreNoencTest, self).setUp() diff --git a/tests/cfs_validate_backup_test.py b/tests/cfs_validate_backup_test.py deleted file mode 100644 index 343020dfc..000000000 --- a/tests/cfs_validate_backup_test.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -import unittest -import random - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' - - -class CfsValidateBackupNoenc(ProbackupTest,unittest.TestCase): - def setUp(self): - pass - - def test_validate_fullbackup_empty_tablespace_after_delete_pg_compression(self): - pass - - def tearDown(self): - pass - - -#class CfsValidateBackupNoenc(CfsValidateBackupNoenc): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsValidateBackupNoenc).setUp() diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index eb46aea19..252b8757e 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -1,29 +1,23 @@ import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -import subprocess +from .helpers.ptrack_helpers import ProbackupTest +from pg_probackup2.gdb import needs_gdb from testgres import QueryException -import shutil -import sys -import time +from parameterized import parameterized +from .helpers.data_helpers import corrupt_data_file, validate_data_file -class CheckdbTest(ProbackupTest, unittest.TestCase): +class CheckdbTest(ProbackupTest): # @unittest.skip("skip") + @needs_gdb def test_checkdb_amcheck_only_sanity(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -34,14 +28,14 @@ def test_checkdb_amcheck_only_sanity(self): node.safe_psql( "postgres", "create index on t_heap(id)") - + node.safe_psql( "postgres", "create table idxpart (a int) " "partition by range (a)") # there aren't partitioned indexes on 10 and lesser versions - if self.get_version(node) >= 110000: + if self.pg_config_version >= 110000: node.safe_psql( "postgres", "create index on idxpart(a)") @@ -55,29 +49,14 @@ def test_checkdb_amcheck_only_sanity(self): "postgres", "create extension amcheck_next") - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') - # simple sanity - try: - self.checkdb_node( - options=['--skip-block-validation']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because --amcheck option is missing\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Option '--skip-block-validation' must be " - "used with '--amcheck' option", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.checkdb_node(options=['--skip-block-validation'], + expect_error="because --amcheck options is missing") + self.assertMessage(contains="ERROR: Option '--skip-block-validation' must be " + "used with '--amcheck' option") # simple sanity - output = self.checkdb_node( + output = self.pb.checkdb_node( options=[ '--amcheck', '--skip-block-validation', @@ -91,56 +70,37 @@ def test_checkdb_amcheck_only_sanity(self): output) # logging to file sanity - try: - self.checkdb_node( + self.pb.checkdb_node( options=[ '--amcheck', '--skip-block-validation', '--log-level-file=verbose', - '-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because log_directory missing\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( + '-d', 'postgres', '-p', str(node.port)], + skip_log_directory=True, + expect_error="because log_directory missing") + self.assertMessage(contains= "ERROR: Cannot save checkdb logs to a file. " "You must specify --log-directory option when " - "running checkdb with --log-level-file option enabled", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + "running checkdb with --log-level-file option enabled") # If backup_dir provided, then instance name must be # provided too - try: - self.checkdb_node( - backup_dir, + self.pb.checkdb_node( + use_backup_dir=True, options=[ '--amcheck', '--skip-block-validation', '--log-level-file=verbose', - '-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because log_directory missing\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Required parameter not specified: --instance", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + '-d', 'postgres', '-p', str(node.port)], + expect_error="because instance missing" + ) + self.assertMessage(contains="ERROR: Required parameter not specified: --instance") # checkdb can use default or set in config values, # if backup_dir and instance name are provided - self.checkdb_node( - backup_dir, - 'node', + self.pb.checkdb_node( + use_backup_dir=True, + instance='node', options=[ '--amcheck', '--skip-block-validation', @@ -148,49 +108,41 @@ def test_checkdb_amcheck_only_sanity(self): '-d', 'postgres', '-p', str(node.port)]) # check that file present and full of messages - os.path.isfile(log_file_path) - with open(log_file_path) as f: - log_file_content = f.read() - self.assertIn( - 'INFO: checkdb --amcheck finished successfully', - log_file_content) - self.assertIn( - 'VERBOSE: (query)', - log_file_content) - os.unlink(log_file_path) + log_file_content = self.read_pb_log() + self.assertIn( + 'INFO: checkdb --amcheck finished successfully', + log_file_content) + self.assertIn( + 'VERBOSE: (query)', + log_file_content) + self.unlink_pg_log() # log-level-file and log-directory are provided - self.checkdb_node( - backup_dir, - 'node', + self.pb.checkdb_node( + use_backup_dir=True, + instance='node', options=[ '--amcheck', '--skip-block-validation', '--log-level-file=verbose', - '--log-directory={0}'.format( - os.path.join(backup_dir, 'log')), '-d', 'postgres', '-p', str(node.port)]) # check that file present and full of messages - os.path.isfile(log_file_path) - with open(log_file_path) as f: - log_file_content = f.read() - self.assertIn( - 'INFO: checkdb --amcheck finished successfully', - log_file_content) - self.assertIn( - 'VERBOSE: (query)', - log_file_content) - os.unlink(log_file_path) - - gdb = self.checkdb_node( + log_file_content = self.read_pb_log() + self.assertIn( + 'INFO: checkdb --amcheck finished successfully', + log_file_content) + self.assertIn( + 'VERBOSE: (query)', + log_file_content) + self.unlink_pg_log() + + gdb = self.pb.checkdb_node( gdb=True, options=[ '--amcheck', '--skip-block-validation', '--log-level-file=verbose', - '--log-directory={0}'.format( - os.path.join(backup_dir, 'log')), '-d', 'postgres', '-p', str(node.port)]) gdb.set_breakpoint('amcheck_one_index') @@ -200,23 +152,20 @@ def test_checkdb_amcheck_only_sanity(self): "postgres", "drop table t_heap") - gdb.remove_all_breakpoints() - gdb.continue_execution_until_exit() # check that message about missing index is present - with open(log_file_path) as f: - log_file_content = f.read() - self.assertIn( - 'ERROR: checkdb --amcheck finished with failure', - log_file_content) - self.assertIn( - "WARNING: Thread [1]. Amcheck failed in database 'postgres' " - "for index: 'public.t_heap_id_idx':", - log_file_content) - self.assertIn( - 'ERROR: could not open relation with OID', - log_file_content) + log_file_content = self.read_pb_log() + self.assertIn( + 'ERROR: checkdb --amcheck finished with failure', + log_file_content) + self.assertIn( + "Amcheck failed in database 'postgres' " + "for index: 'public.t_heap_id_idx':", + log_file_content) + self.assertIn( + 'ERROR: could not open relation with OID', + log_file_content) # Clean after yourself gdb.kill() @@ -225,14 +174,12 @@ def test_checkdb_amcheck_only_sanity(self): # @unittest.skip("skip") def test_basic_checkdb_amcheck_only_sanity(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # create two databases @@ -276,24 +223,13 @@ def test_basic_checkdb_amcheck_only_sanity(self): "db2", "select pg_relation_filepath('some_index')").decode('utf-8').rstrip()) - try: - self.checkdb_node( + self.pb.checkdb_node( options=[ '--amcheck', '--skip-block-validation', - '-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because some db was not amchecked" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Some databases were not amchecked", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + '-d', 'postgres', '-p', str(node.port)], + expect_error="because some db was not amchecked") + self.assertMessage(contains="ERROR: Some databases were not amchecked") node.stop() @@ -302,59 +238,37 @@ def test_basic_checkdb_amcheck_only_sanity(self): f.seek(42000) f.write(b"blablahblahs") f.flush() - f.close with open(index_path_2, "rb+", 0) as f: f.seek(42000) f.write(b"blablahblahs") f.flush() - f.close node.slow_start() - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') - - try: - self.checkdb_node( + self.pb.checkdb_node( options=[ '--amcheck', '--skip-block-validation', '--log-level-file=verbose', - '--log-directory={0}'.format( - os.path.join(backup_dir, 'log')), - '-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because some db was not amchecked" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: checkdb --amcheck finished with failure", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + '-d', 'postgres', '-p', str(node.port)], + expect_error="because some file checks failed") + self.assertMessage(contains="ERROR: checkdb --amcheck finished with failure") # corruption of both indexes in db1 and db2 must be detected # also the that amcheck is not installed in 'postgres' # should be logged - with open(log_file_path) as f: - log_file_content = f.read() - self.assertIn( - "WARNING: Thread [1]. Amcheck failed in database 'db1' " - "for index: 'public.pgbench_accounts_pkey':", - log_file_content) - - self.assertIn( - "WARNING: Thread [1]. Amcheck failed in database 'db2' " - "for index: 'public.some_index':", - log_file_content) - - self.assertIn( - "ERROR: checkdb --amcheck finished with failure", - log_file_content) + log_file_content = self.read_pb_log() + self.assertMessage(log_file_content, contains= + "Amcheck failed in database 'db1' " + "for index: 'public.pgbench_accounts_pkey':") + + self.assertMessage(log_file_content, contains= + "Amcheck failed in database 'db2' " + "for index: 'public.some_index':") + + self.assertMessage(log_file_content, contains= + "ERROR: checkdb --amcheck finished with failure") # Clean after yourself node.stop() @@ -362,15 +276,11 @@ def test_basic_checkdb_amcheck_only_sanity(self): # @unittest.skip("skip") def test_checkdb_block_validation_sanity(self): """make node, corrupt some pages, check that checkdb failed""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -387,27 +297,14 @@ def test_checkdb_block_validation_sanity(self): "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() # sanity - try: - self.checkdb_node() - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because pgdata must be specified\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.checkdb_node( + self.pb.checkdb_node(expect_error="because pgdata must be specified") + self.assertMessage(contains="No postgres data directory specified.\nPlease specify it either using environment variable PGDATA or\ncommand line option --pgdata (-D)") + + self.pb.checkdb_node( data_dir=node.data_dir, options=['-d', 'postgres', '-p', str(node.port)]) - self.checkdb_node( - backup_dir, 'node', + self.pb.checkdb_node(use_backup_dir=True, instance='node', options=['-d', 'postgres', '-p', str(node.port)]) heap_full_path = os.path.join(node.data_dir, heap_path) @@ -416,50 +313,78 @@ def test_checkdb_block_validation_sanity(self): f.seek(9000) f.write(b"bla") f.flush() - f.close with open(heap_full_path, "rb+", 0) as f: f.seek(42000) f.write(b"bla") f.flush() - f.close - try: - self.checkdb_node( - backup_dir, 'node', - options=['-d', 'postgres', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of data corruption\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Checkdb failed", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Corruption detected in file "{0}", block 1'.format( - os.path.normpath(heap_full_path)), - e.message) - - self.assertIn( - 'WARNING: Corruption detected in file "{0}", block 5'.format( - os.path.normpath(heap_full_path)), - e.message) + self.pb.checkdb_node(use_backup_dir=True, instance='node', + options=['-d', 'postgres', '-p', str(node.port)], + expect_error="because of data corruption") + self.assertMessage(contains="ERROR: Checkdb failed") + self.assertMessage(contains='WARNING: Corruption detected in file "{0}", block 1'.format( + os.path.normpath(heap_full_path))) + + self.assertMessage(contains='WARNING: Corruption detected in file "{0}", block 5'.format( + os.path.normpath(heap_full_path))) # Clean after yourself node.stop() + # @unittest.skip("skip") + @parameterized.expand(("vm", "fsm")) + def test_checkdb_nondatafile_validation(self, fork_kind): + """make node, corrupt vm file, check that checkdb failed""" + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select 1 as id, md5(i::text) as text, " + "md5(repeat(i::text,10))::tsvector as tsvector " + "from generate_series(0,1000) i") + node.safe_psql( + "postgres", + "CHECKPOINT;") + + node.safe_psql( + "postgres", + "VACUUM t_heap;") + + node.safe_psql( + "postgres", + "CHECKPOINT;") + + heap_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + heap_path += "_" + fork_kind + + self.pb.checkdb_node(use_backup_dir=True, instance='node', + options=['-d', 'postgres', '-p', str(node.port)]) + + heap_full_path = os.path.join(node.data_dir, heap_path) + self.assertTrue(os.path.exists(heap_full_path)) + + self.assertTrue(validate_data_file(heap_full_path)) + self.assertTrue(corrupt_data_file(heap_full_path), "corrupting file error") + + self.pb.checkdb_node(use_backup_dir=True, instance='node', + options=['-d', 'postgres', '-p', str(node.port)], + expect_error="because of data corruption") + self.assertMessage(contains="ERROR: Checkdb failed") + self.assertMessage(contains='WARNING: Corruption detected in file "{0}"'.format( + os.path.normpath(heap_full_path))) + def test_checkdb_checkunique(self): """Test checkunique parameter of amcheck.bt_index_check function""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') node.slow_start() try: @@ -494,7 +419,7 @@ def test_checkdb_checkunique(self): "DELETE FROM bttest_unique WHERE ctid::text='(9,3)';") # run without checkunique option (error will not detected) - output = self.checkdb_node( + output = self.pb.checkdb_node( options=[ '--amcheck', '--skip-block-validation', @@ -508,54 +433,48 @@ def test_checkdb_checkunique(self): output) # run with checkunique option - try: - self.checkdb_node( + if (ProbackupTest.enterprise and + (self.pg_config_version >= 111300 and self.pg_config_version < 120000 + or self.pg_config_version >= 120800 and self.pg_config_version < 130000 + or self.pg_config_version >= 130400 and self.pg_config_version < 160000 + or self.pg_config_version > 160000)): + self.pb.checkdb_node( + options=[ + '--amcheck', + '--skip-block-validation', + '--checkunique', + '-d', 'postgres', '-p', str(node.port)], + expect_error="because of index corruption") + self.assertMessage(contains= + "ERROR: checkdb --amcheck finished with failure. Not all checked indexes are valid. All databases were amchecked.") + + self.assertMessage(contains= + "Amcheck failed in database 'postgres' for index: 'public.bttest_unique_idx'") + + self.assertMessage(regex= + r"ERROR:[^\n]*(violating UNIQUE constraint|uniqueness is violated)") + else: + self.pb.checkdb_node( options=[ '--amcheck', '--skip-block-validation', '--checkunique', '-d', 'postgres', '-p', str(node.port)]) - if (ProbackupTest.enterprise and - (self.get_version(node) >= 111300 and self.get_version(node) < 120000 - or self.get_version(node) >= 120800 and self.get_version(node) < 130000 - or self.get_version(node) >= 130400)): - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of index corruption\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - else: - self.assertRegex( - self.output, - r"WARNING: Extension 'amcheck(|_next)' version [\d.]* in schema 'public' do not support 'checkunique' parameter") - except ProbackupException as e: - self.assertIn( - "ERROR: checkdb --amcheck finished with failure. Not all checked indexes are valid. All databases were amchecked.", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "Amcheck failed in database 'postgres' for index: 'public.bttest_unique_idx': ERROR: index \"bttest_unique_idx\" is corrupted. There are tuples violating UNIQUE constraint", - e.message) + self.assertMessage(regex= + r"WARNING: Extension 'amcheck(|_next)' version [\d.]* in schema 'public' do not support 'checkunique' parameter") # Clean after yourself node.stop() # @unittest.skip("skip") + @needs_gdb def test_checkdb_sigint_handling(self): """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() try: @@ -568,8 +487,7 @@ def test_checkdb_sigint_handling(self): "create extension amcheck_next") # FULL backup - gdb = self.checkdb_node( - backup_dir, 'node', gdb=True, + gdb = self.pb.checkdb_node(use_backup_dir=True, instance='node', gdb=True, options=[ '-d', 'postgres', '-j', '2', '--skip-block-validation', @@ -578,11 +496,9 @@ def test_checkdb_sigint_handling(self): gdb.set_breakpoint('amcheck_one_index') gdb.run_until_break() - gdb.continue_execution_until_break(20) - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') + gdb.signal('SIGINT') gdb.continue_execution_until_error() with open(node.pg_log_file, 'r') as f: @@ -599,13 +515,11 @@ def test_checkdb_sigint_handling(self): # @unittest.skip("skip") def test_checkdb_with_least_privileges(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -637,66 +551,8 @@ def test_checkdb_with_least_privileges(self): "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC;") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') # amcheck-next function - - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' -# 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') - # PG 10 - elif self.get_version(node) > 100000 and self.get_version(node) < 110000: + if self.pg_config_version < 110000: node.safe_psql( 'backupdb', 'CREATE ROLE backup WITH LOGIN; ' @@ -722,20 +578,15 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup;' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup;') - + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup;' + ) if ProbackupTest.enterprise: # amcheck-1.1 node.safe_psql( 'backupdb', 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup') - else: - # amcheck-1.0 - node.safe_psql( - 'backupdb', - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup') # >= 11 < 14 - elif self.get_version(node) > 110000 and self.get_version(node) < 140000: + elif self.pg_config_version > 110000 and self.pg_config_version < 140000: node.safe_psql( 'backupdb', 'CREATE ROLE backup WITH LOGIN; ' @@ -762,13 +613,13 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') - + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' + ) # checkunique parameter if ProbackupTest.enterprise: - if (self.get_version(node) >= 111300 and self.get_version(node) < 120000 - or self.get_version(node) >= 120800 and self.get_version(node) < 130000 - or self.get_version(node) >= 130400): + if (self.pg_config_version >= 111300 and self.pg_config_version < 120000 + or self.pg_config_version >= 120800 and self.pg_config_version < 130000 + or self.pg_config_version >= 130400): node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") @@ -800,52 +651,30 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') - + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' + ) # checkunique parameter - if ProbackupTest.enterprise: + if ProbackupTest.enterprise and self.pg_config_version != 160000: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") if ProbackupTest.pgpro: node.safe_psql( - 'backupdb', - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # checkdb - try: - self.checkdb_node( - backup_dir, 'node', - options=[ - '--amcheck', '-U', 'backup', - '-d', 'backupdb', '-p', str(node.port)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because permissions are missing\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "INFO: Amcheck succeeded for database 'backupdb'", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "WARNING: Extension 'amcheck' or 'amcheck_next' are " - "not installed in database postgres", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "ERROR: Some databases were not amchecked", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.checkdb_node(use_backup_dir=True, instance='node', + options=[ + '--amcheck', '-U', 'backup', + '-d', 'backupdb', '-p', str(node.port)], + expect_error="because permissions are missing") + self.assertMessage(contains="INFO: Amcheck succeeded for database 'backupdb'") + self.assertMessage(contains="WARNING: Extension 'amcheck' or 'amcheck_next' " + "are not installed in database postgres") + self.assertMessage(contains="ERROR: Some databases were not amchecked") # Clean after yourself node.stop() diff --git a/tests/compatibility_test.py b/tests/compatibility_test.py index 7ae8baf9f..3fa005a21 100644 --- a/tests/compatibility_test.py +++ b/tests/compatibility_test.py @@ -1,7 +1,7 @@ import unittest import subprocess import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest, fs_backup_class from sys import exit import shutil @@ -14,7 +14,9 @@ def check_ssh_agent_path_exists(): return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ -class CrossCompatibilityTest(ProbackupTest, unittest.TestCase): +class CrossCompatibilityTest(ProbackupTest): + auto_compress_alg = False + @unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test') @unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist') # @unittest.skip("skip") @@ -44,8 +46,7 @@ def test_catchup_with_different_remote_major_pg(self): # pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.clean/bin/' pgprobackup_ssh_agent_path = os.environ['PGPROBACKUP_SSH_AGENT_PATH'] - src_pg = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'src'), + src_pg = self.pg_node.make_simple('src', set_replication=True, ) src_pg.slow_start() @@ -54,8 +55,8 @@ def test_catchup_with_different_remote_major_pg(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # do full catchup - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - self.catchup_node( + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( backup_mode='FULL', source_pgdata=src_pg.data_dir, destination_node=dst_pg, @@ -63,7 +64,7 @@ def test_catchup_with_different_remote_major_pg(self): ) dst_options = {'port': str(dst_pg.port)} - self.set_auto_conf(dst_pg, dst_options) + dst_pg.set_auto_conf(dst_options) dst_pg.slow_start() dst_pg.stop() @@ -73,7 +74,7 @@ def test_catchup_with_different_remote_major_pg(self): # do delta catchup with remote pg_probackup agent with another postgres major version # this DELTA backup should fail without PBCKP-236 patch. - self.catchup_node( + self.pb.catchup_node( backup_mode='DELTA', source_pgdata=src_pg.data_dir, destination_node=dst_pg, @@ -82,53 +83,48 @@ def test_catchup_with_different_remote_major_pg(self): ) -class CompatibilityTest(ProbackupTest, unittest.TestCase): +class CompatibilityTest(ProbackupTest): def setUp(self): super().setUp() if not self.probackup_old_path: - self.skipTest('PGPROBACKUPBIN_OLD is not set') + self.skipTest('An old version of pg_probackup is not set up') # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_page(self): """Description in jira issue PGPRO-434""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) + self.pb.init(old_binary=True) + self.pb.show() - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) + self.pb.add_instance('node', node, old_binary=True) + self.pb.show() - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=10) # FULL backup with old binary - self.backup_node( - backup_dir, 'node', node, old_binary=True) + self.pb.backup_node('node', node, old_binary=True) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - self.show_pb(backup_dir) + self.pb.show() - self.validate_pb(backup_dir) + self.pb.validate() # RESTORE old FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -143,16 +139,14 @@ def test_backward_compatibility_page(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, backup_type='page', + self.pb.backup_node('node', node, backup_type='page', old_binary=True) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -167,16 +161,14 @@ def test_backward_compatibility_page(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -194,13 +186,12 @@ def test_backward_compatibility_page(self): 'postgres', 'VACUUM') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -209,13 +200,12 @@ def test_backward_compatibility_page(self): 'postgres', 'insert into pgbench_accounts select * from pgbench_accounts') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -224,42 +214,37 @@ def test_backward_compatibility_page(self): # @unittest.skip("skip") def test_backward_compatibility_delta(self): """Description in jira issue PGPRO-434""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) + self.pb.init(old_binary=True) + self.pb.show() - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) + self.pb.add_instance('node', node, old_binary=True) + self.pb.show() - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=10) # FULL backup with old binary - self.backup_node( - backup_dir, 'node', node, old_binary=True) + self.pb.backup_node('node', node, old_binary=True) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - self.show_pb(backup_dir) + self.pb.show() - self.validate_pb(backup_dir) + self.pb.validate() # RESTORE old FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -274,16 +259,14 @@ def test_backward_compatibility_delta(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', old_binary=True) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -298,15 +281,14 @@ def test_backward_compatibility_delta(self): pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -324,13 +306,12 @@ def test_backward_compatibility_delta(self): 'postgres', 'VACUUM') - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -339,13 +320,12 @@ def test_backward_compatibility_delta(self): 'postgres', 'insert into pgbench_accounts select * from pgbench_accounts') - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -358,20 +338,18 @@ def test_backward_compatibility_ptrack(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) + self.pb.init(old_binary=True) + self.pb.show() - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) + self.pb.add_instance('node', node, old_binary=True) + self.pb.show() - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.safe_psql( @@ -381,24 +359,21 @@ def test_backward_compatibility_ptrack(self): node.pgbench_init(scale=10) # FULL backup with old binary - self.backup_node( - backup_dir, 'node', node, old_binary=True) + self.pb.backup_node('node', node, old_binary=True) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - self.show_pb(backup_dir) + self.pb.show() - self.validate_pb(backup_dir) + self.pb.validate() # RESTORE old FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -413,16 +388,14 @@ def test_backward_compatibility_ptrack(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', old_binary=True) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "--recovery-target=latest", @@ -441,16 +414,14 @@ def test_backward_compatibility_ptrack(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack') + self.pb.backup_node('node', node, backup_type='ptrack') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "--recovery-target=latest", @@ -464,23 +435,20 @@ def test_backward_compatibility_ptrack(self): # @unittest.skip("skip") def test_backward_compatibility_compression(self): """Description in jira issue PGPRO-434""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=10) # FULL backup with OLD binary - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, old_binary=True, options=['--compress']) @@ -488,13 +456,11 @@ def test_backward_compatibility_compression(self): pgdata = self.pgdata_content(node.data_dir) # restore OLD FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: @@ -509,8 +475,7 @@ def test_backward_compatibility_compression(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='page', old_binary=True, options=['--compress']) @@ -519,8 +484,7 @@ def test_backward_compatibility_compression(self): pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: @@ -535,8 +499,7 @@ def test_backward_compatibility_compression(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='page', options=['--compress']) @@ -545,8 +508,7 @@ def test_backward_compatibility_compression(self): node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: @@ -554,10 +516,9 @@ def test_backward_compatibility_compression(self): self.compare_pgdata(pgdata, pgdata_restored) # Delta backup with old binary - self.delete_pb(backup_dir, 'node', backup_id) + self.pb.delete('node', backup_id) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, old_binary=True, options=['--compress']) @@ -569,8 +530,7 @@ def test_backward_compatibility_compression(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=['--compress'], old_binary=True) @@ -580,8 +540,7 @@ def test_backward_compatibility_compression(self): node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: @@ -597,8 +556,7 @@ def test_backward_compatibility_compression(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=['--compress']) @@ -607,8 +565,7 @@ def test_backward_compatibility_compression(self): node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: @@ -616,80 +573,70 @@ def test_backward_compatibility_compression(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_backward_compatibility_merge(self): """ Create node, take FULL and PAGE backups with old binary, merge them with new binary """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() # FULL backup with OLD binary - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, old_binary=True) node.pgbench_init(scale=1) # PAGE backup with OLD binary - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='page', old_binary=True) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - self.merge_backup(backup_dir, "node", backup_id) + self.pb.merge_backup("node", backup_id) - self.show_pb(backup_dir, as_text=True, as_json=False) + self.pb.show(as_text=True, as_json=False) # restore OLD FULL with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_backward_compatibility_merge_1(self): """ Create node, take FULL and PAGE backups with old binary, merge them with new binary. old binary version =< 2.2.7 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=20) # FULL backup with OLD binary - self.backup_node(backup_dir, 'node', node, old_binary=True) + self.pb.backup_node('node', node, old_binary=True) pgbench = node.pgbench( stdout=subprocess.PIPE, @@ -699,8 +646,7 @@ def test_backward_compatibility_merge_1(self): pgbench.stdout.close() # PAGE1 backup with OLD binary - self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) + self.pb.backup_node('node', node, backup_type='page', old_binary=True) node.safe_psql( 'postgres', @@ -711,13 +657,12 @@ def test_backward_compatibility_merge_1(self): 'VACUUM pgbench_accounts') # PAGE2 backup with OLD binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) + backup_id = self.pb.backup_node('node', node, backup_type='page', old_binary=True) pgdata = self.pgdata_content(node.data_dir) # merge chain created by old binary with new binary - output = self.merge_backup(backup_dir, "node", backup_id) + output = self.pb.merge_backup("node", backup_id) # check that in-place is disabled self.assertIn( @@ -725,33 +670,29 @@ def test_backward_compatibility_merge_1(self): "because of storage format incompatibility", output) # restore merged backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_backward_compatibility_merge_2(self): """ Create node, take FULL and PAGE backups with old binary, merge them with new binary. old binary version =< 2.2.7 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=50) @@ -760,11 +701,10 @@ def test_backward_compatibility_merge_2(self): 'postgres', 'VACUUM pgbench_accounts') - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') # FULL backup with OLD binary - self.backup_node(backup_dir, 'node', node, old_binary=True) + self.pb.backup_node('node', node, old_binary=True) pgbench = node.pgbench( stdout=subprocess.PIPE, @@ -774,8 +714,7 @@ def test_backward_compatibility_merge_2(self): pgbench.stdout.close() # PAGE1 backup with OLD binary - page1 = self.backup_node( - backup_dir, 'node', node, + page1 = self.pb.backup_node('node', node, backup_type='page', old_binary=True) pgdata1 = self.pgdata_content(node.data_dir) @@ -785,15 +724,13 @@ def test_backward_compatibility_merge_2(self): "DELETE from pgbench_accounts where ctid > '(10,1)'") # PAGE2 backup with OLD binary - page2 = self.backup_node( - backup_dir, 'node', node, + page2 = self.pb.backup_node('node', node, backup_type='page', old_binary=True) pgdata2 = self.pgdata_content(node.data_dir) # PAGE3 backup with OLD binary - page3 = self.backup_node( - backup_dir, 'node', node, + page3 = self.pb.backup_node('node', node, backup_type='page', old_binary=True) pgdata3 = self.pgdata_content(node.data_dir) @@ -806,70 +743,64 @@ def test_backward_compatibility_merge_2(self): pgbench.stdout.close() # PAGE4 backup with NEW binary - page4 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page4 = self.pb.backup_node('node', node, backup_type='page') pgdata4 = self.pgdata_content(node.data_dir) # merge backups one by one and check data correctness # merge PAGE1 - self.merge_backup( - backup_dir, "node", page1, options=['--log-level-file=VERBOSE']) + self.pb.merge_backup("node", page1, options=['--log-level-file=VERBOSE']) # check data correctness for PAGE1 node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, backup_id=page1, + self.pb.restore_node('node', node_restored, backup_id=page1, options=['--log-level-file=VERBOSE']) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata1, pgdata_restored) # merge PAGE2 - self.merge_backup(backup_dir, "node", page2) + self.pb.merge_backup("node", page2) # check data correctness for PAGE2 node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page2) + self.pb.restore_node('node', node=node_restored, backup_id=page2) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata2, pgdata_restored) # merge PAGE3 - self.show_pb(backup_dir, 'node', page3) - self.merge_backup(backup_dir, "node", page3) - self.show_pb(backup_dir, 'node', page3) + self.pb.show('node', page3) + self.pb.merge_backup("node", page3) + self.pb.show('node', page3) # check data correctness for PAGE3 node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page3) + self.pb.restore_node('node', node=node_restored, backup_id=page3) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata3, pgdata_restored) # merge PAGE4 - self.merge_backup(backup_dir, "node", page4) + self.pb.merge_backup("node", page4) # check data correctness for PAGE4 node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page4) + self.pb.restore_node('node', node_restored, backup_id=page4) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata4, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_backward_compatibility_merge_3(self): """ Create node, take FULL and PAGE backups with old binary, merge them with new binary. old binary version =< 2.2.7 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=50) @@ -878,12 +809,10 @@ def test_backward_compatibility_merge_3(self): 'postgres', 'VACUUM pgbench_accounts') - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') # FULL backup with OLD binary - self.backup_node( - backup_dir, 'node', node, old_binary=True, options=['--compress']) + self.pb.backup_node('node', node, old_binary=True, options=['--compress']) pgbench = node.pgbench( stdout=subprocess.PIPE, @@ -893,8 +822,7 @@ def test_backward_compatibility_merge_3(self): pgbench.stdout.close() # PAGE1 backup with OLD binary - page1 = self.backup_node( - backup_dir, 'node', node, + page1 = self.pb.backup_node('node', node, backup_type='page', old_binary=True, options=['--compress']) pgdata1 = self.pgdata_content(node.data_dir) @@ -904,15 +832,13 @@ def test_backward_compatibility_merge_3(self): "DELETE from pgbench_accounts where ctid > '(10,1)'") # PAGE2 backup with OLD binary - page2 = self.backup_node( - backup_dir, 'node', node, + page2 = self.pb.backup_node('node', node, backup_type='page', old_binary=True, options=['--compress']) pgdata2 = self.pgdata_content(node.data_dir) # PAGE3 backup with OLD binary - page3 = self.backup_node( - backup_dir, 'node', node, + page3 = self.pb.backup_node('node', node, backup_type='page', old_binary=True, options=['--compress']) pgdata3 = self.pgdata_content(node.data_dir) @@ -925,54 +851,50 @@ def test_backward_compatibility_merge_3(self): pgbench.stdout.close() # PAGE4 backup with NEW binary - page4 = self.backup_node( - backup_dir, 'node', node, backup_type='page', options=['--compress']) + page4 = self.pb.backup_node('node', node, backup_type='page', options=['--compress']) pgdata4 = self.pgdata_content(node.data_dir) # merge backups one by one and check data correctness # merge PAGE1 - self.merge_backup( - backup_dir, "node", page1, options=['--log-level-file=VERBOSE']) + self.pb.merge_backup("node", page1, options=['--log-level-file=VERBOSE']) # check data correctness for PAGE1 node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, backup_id=page1, + self.pb.restore_node('node', node_restored, backup_id=page1, options=['--log-level-file=VERBOSE']) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata1, pgdata_restored) # merge PAGE2 - self.merge_backup(backup_dir, "node", page2) + self.pb.merge_backup("node", page2) # check data correctness for PAGE2 node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page2) + self.pb.restore_node('node', node_restored, backup_id=page2) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata2, pgdata_restored) # merge PAGE3 - self.show_pb(backup_dir, 'node', page3) - self.merge_backup(backup_dir, "node", page3) - self.show_pb(backup_dir, 'node', page3) + self.pb.show('node', page3) + self.pb.merge_backup("node", page3) + self.pb.show('node', page3) # check data correctness for PAGE3 node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page3) + self.pb.restore_node('node', node_restored, backup_id=page3) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata3, pgdata_restored) # merge PAGE4 - self.merge_backup(backup_dir, "node", page4) + self.pb.merge_backup("node", page4) # check data correctness for PAGE4 node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored, backup_id=page4) + self.pb.restore_node('node', node_restored, backup_id=page4) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata4, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_backward_compatibility_merge_4(self): """ Start merge between minor version, crash and retry it. @@ -982,16 +904,14 @@ def test_backward_compatibility_merge_4(self): self.assertTrue( False, 'You need pg_probackup old_binary =< 2.4.0 for this test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=20) @@ -1000,12 +920,10 @@ def test_backward_compatibility_merge_4(self): 'postgres', 'VACUUM pgbench_accounts') - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') # FULL backup with OLD binary - self.backup_node( - backup_dir, 'node', node, old_binary=True, options=['--compress']) + self.pb.backup_node('node', node, old_binary=True, options=['--compress']) pgbench = node.pgbench( stdout=subprocess.PIPE, @@ -1015,37 +933,28 @@ def test_backward_compatibility_merge_4(self): pgbench.stdout.close() # PAGE backup with NEW binary - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', options=['--compress']) + page_id = self.pb.backup_node('node', node, backup_type='page', options=['--compress']) pgdata = self.pgdata_content(node.data_dir) # merge PAGE4 - gdb = self.merge_backup(backup_dir, "node", page_id, gdb=True) + gdb = self.pb.merge_backup("node", page_id, gdb=True) gdb.set_breakpoint('rename') gdb.run_until_break() gdb.continue_execution_until_break(500) - gdb._execute('signal SIGKILL') - - try: - self.merge_backup(backup_dir, "node", page_id) - self.assertEqual( - 1, 0, - "Expecting Error because of format changes.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, "node") + + self.pb.merge_backup("node", page_id, + expect_error="because of format changes") + self.assertMessage(contains= "ERROR: Retry of failed merge for backups with different " "between minor versions is forbidden to avoid data corruption " "because of storage format changes introduced in 2.4.0 version, " - "please take a new full backup", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + "please take a new full backup") # @unittest.expectedFailure - # @unittest.skip("skip") def test_backward_compatibility_merge_5(self): """ Create node, take FULL and PAGE backups with old binary, @@ -1060,22 +969,20 @@ def test_backward_compatibility_merge_5(self): self.version_to_num(self.old_probackup_version), self.version_to_num(self.probackup_version)) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=20) # FULL backup with OLD binary - self.backup_node(backup_dir, 'node', node, old_binary=True) + self.pb.backup_node('node', node, old_binary=True) pgbench = node.pgbench( stdout=subprocess.PIPE, @@ -1085,8 +992,7 @@ def test_backward_compatibility_merge_5(self): pgbench.stdout.close() # PAGE1 backup with OLD binary - self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) + self.pb.backup_node('node', node, backup_type='page', old_binary=True) node.safe_psql( 'postgres', @@ -1097,13 +1003,12 @@ def test_backward_compatibility_merge_5(self): 'VACUUM pgbench_accounts') # PAGE2 backup with OLD binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) + backup_id = self.pb.backup_node('node', node, backup_type='page', old_binary=True) pgdata = self.pgdata_content(node.data_dir) # merge chain created by old binary with new binary - output = self.merge_backup(backup_dir, "node", backup_id) + output = self.pb.merge_backup("node", backup_id) # check that in-place is disabled self.assertNotIn( @@ -1111,11 +1016,10 @@ def test_backward_compatibility_merge_5(self): "because of storage format incompatibility", output) # restore merged backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -1131,15 +1035,13 @@ def test_page_vacuum_truncate(self): and check data correctness old binary should be 2.2.x version """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.safe_psql( @@ -1154,7 +1056,7 @@ def test_page_vacuum_truncate(self): "postgres", "vacuum t_heap") - id1 = self.backup_node(backup_dir, 'node', node, old_binary=True) + id1 = self.pb.backup_node('node', node, old_binary=True) pgdata1 = self.pgdata_content(node.data_dir) node.safe_psql( @@ -1165,8 +1067,7 @@ def test_page_vacuum_truncate(self): "postgres", "vacuum t_heap") - id2 = self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) + id2 = self.pb.backup_node('node', node, backup_type='page', old_binary=True) pgdata2 = self.pgdata_content(node.data_dir) node.safe_psql( @@ -1176,47 +1077,39 @@ def test_page_vacuum_truncate(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,1) i") - id3 = self.backup_node( - backup_dir, 'node', node, backup_type='page', old_binary=True) + id3 = self.pb.backup_node('node', node, backup_type='page', old_binary=True) pgdata3 = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id1) + self.pb.restore_node('node', node_restored, backup_id=id1) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata1, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id2) + self.pb.restore_node('node', node_restored, backup_id=id2) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata2, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id3) + self.pb.restore_node('node', node_restored, backup_id=id3) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata3, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() node_restored.cleanup() @@ -1231,15 +1124,13 @@ def test_page_vacuum_truncate_compression(self): and check data correctness old binary should be 2.2.x version """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.safe_psql( @@ -1254,8 +1145,7 @@ def test_page_vacuum_truncate_compression(self): "postgres", "vacuum t_heap") - self.backup_node( - backup_dir, 'node',node, old_binary=True, options=['--compress']) + self.pb.backup_node('node',node, old_binary=True, options=['--compress']) node.safe_psql( "postgres", @@ -1265,8 +1155,7 @@ def test_page_vacuum_truncate_compression(self): "postgres", "vacuum t_heap") - self.backup_node( - backup_dir, 'node', node, backup_type='page', + self.pb.backup_node('node', node, backup_type='page', old_binary=True, options=['--compress']) node.safe_psql( @@ -1276,23 +1165,21 @@ def test_page_vacuum_truncate_compression(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,1) i") - self.backup_node( - backup_dir, 'node', node, backup_type='page', + self.pb.backup_node('node', node, backup_type='page', old_binary=True, options=['--compress']) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") @@ -1306,15 +1193,13 @@ def test_page_vacuum_truncate_compressed_1(self): and check data correctness old binary should be 2.2.x version """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.set_archiving(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) + self.pb.set_archiving('node', node, old_binary=True) node.slow_start() node.safe_psql( @@ -1329,8 +1214,7 @@ def test_page_vacuum_truncate_compressed_1(self): "postgres", "vacuum t_heap") - id1 = self.backup_node( - backup_dir, 'node', node, + id1 = self.pb.backup_node('node', node, old_binary=True, options=['--compress']) pgdata1 = self.pgdata_content(node.data_dir) @@ -1342,8 +1226,7 @@ def test_page_vacuum_truncate_compressed_1(self): "postgres", "vacuum t_heap") - id2 = self.backup_node( - backup_dir, 'node', node, backup_type='page', + id2 = self.pb.backup_node('node', node, backup_type='page', old_binary=True, options=['--compress']) pgdata2 = self.pgdata_content(node.data_dir) @@ -1354,48 +1237,40 @@ def test_page_vacuum_truncate_compressed_1(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,1) i") - id3 = self.backup_node( - backup_dir, 'node', node, backup_type='page', + id3 = self.pb.backup_node('node', node, backup_type='page', old_binary=True, options=['--compress']) pgdata3 = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id1) + self.pb.restore_node('node', node_restored, backup_id=id1) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata1, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id2) + self.pb.restore_node('node', node_restored, backup_id=id2) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata2, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - data_dir=node_restored.data_dir, backup_id=id3) + self.pb.restore_node('node', node_restored, backup_id=id3) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata3, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() node_restored.cleanup() @@ -1407,41 +1282,35 @@ def test_hidden_files(self): with old binary, then try to delete backup with new binary """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) node.slow_start() open(os.path.join(node.data_dir, ".hidden_stuff"), 'a').close() - backup_id = self.backup_node( - backup_dir, 'node',node, old_binary=True, options=['--stream']) + backup_id = self.pb.backup_node('node',node, old_binary=True, options=['--stream']) - self.delete_pb(backup_dir, 'node', backup_id) + self.pb.delete('node', backup_id) # @unittest.skip("skip") def test_compatibility_tablespace(self): """ https://github.com/postgrespro/pg_probackup/issues/348 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + backup_dir = self.backup_dir - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init() + self.pb.add_instance('node', node, old_binary=True) node.slow_start() - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="full", + backup_id = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"], old_binary=True) tblspace_old_path = self.get_tblspace_path(node, 'tblspace_old') @@ -1459,37 +1328,23 @@ def test_compatibility_tablespace(self): tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "-T", "{0}={1}".format( - tblspace_old_path, tblspace_new_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( + self.pb.restore_node('node', node_restored, + options=[ + "-j", "4", + "-T", "{0}={1}".format( + tblspace_old_path, tblspace_new_path)], + expect_error="because tablespace mapping is incorrect") + self.assertMessage(contains= 'ERROR: Backup {0} has no tablespaceses, ' - 'nothing to remap'.format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + 'nothing to remap'.format(backup_id)) - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"], old_binary=True) - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -1501,3 +1356,44 @@ def test_compatibility_tablespace(self): if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_compatibility_master_options(self): + """ + Test correctness of handling of removed master-db, master-host, master-port, + master-user and replica-timeout options + """ + self.assertTrue( + self.version_to_num(self.old_probackup_version) <= self.version_to_num('2.6.0'), + 'You need pg_probackup old_binary =< 2.6.0 for this test') + + node = self.pg_node.make_simple('node') + backup_dir = self.backup_dir + + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) + + # add deprecated options (using probackup< 2.6) into pg_probackup.conf + # don't care about option values, we can use random values here + self.pb.set_config('node', + options=[ + '--master-db=postgres', + '--master-host=localhost', + '--master-port=5432', + '--master-user={0}'.format(self.username), + '--replica-timeout=100500'], + old_binary=True) + + # and try to show config with new binary (those options must be silently skipped) + self.pb.show_config('node', old_binary=False) + + # store config with new version (those options must disappear from config) + self.pb.set_config('node', + options=[], + old_binary=False) + + # and check absence + config_options = self.pb.show_config('node', old_binary=False) + self.assertFalse( + ['master-db', 'master-host', 'master-port', 'master-user', 'replica-timeout'] & config_options.keys(), + 'Obsolete options found in new config') \ No newline at end of file diff --git a/tests/compression_test.py b/tests/compression_test.py index 55924b9d2..74b044791 100644 --- a/tests/compression_test.py +++ b/tests/compression_test.py @@ -1,231 +1,41 @@ -import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess +from pg_probackup2.init_helpers import init_params +from .helpers.ptrack_helpers import ProbackupTest -class CompressionTest(ProbackupTest, unittest.TestCase): - # @unittest.skip("skip") - # @unittest.expectedFailure - def test_basic_compression_stream_zlib(self): - """ - make archive node, make full and page stream backups, - check data correctness in restored instance - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,256) i") - full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=[ - '--stream', - '--compress-algorithm=zlib']) - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(256,512) i") - page_result = node.table_checksum("t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=[ - '--stream', '--compress-algorithm=zlib']) - - # DELTA BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(512,768) i") - delta_result = node.table_checksum("t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream', '--compress-algorithm=zlib']) - - # Drop Node - node.cleanup() +def have_alg(alg): + return alg in init_params.probackup_compressions - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - full_result_new = node.table_checksum("t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=page_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() +class CompressionTest(ProbackupTest): - page_result_new = node.table_checksum("t_heap") - self.assertEqual(page_result, page_result_new) - node.cleanup() + def test_basic_compression_stream_pglz(self): + self._test_compression_stream(compression = 'pglz') - # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=delta_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - delta_result_new = node.table_checksum("t_heap") - self.assertEqual(delta_result, delta_result_new) - - def test_compression_archive_zlib(self): - """ - make archive node, make full and page backups, - check data correctness in restored instance - """ - self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") - full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=["--compress-algorithm=zlib"]) - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(0,2) i") - page_result = node.table_checksum("t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=["--compress-algorithm=zlib"]) - - # DELTA BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,3) i") - delta_result = node.table_checksum("t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--compress-algorithm=zlib']) - - # Drop Node - node.cleanup() - - # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - full_result_new = node.table_checksum("t_heap") - self.assertEqual(full_result, full_result_new) - node.cleanup() - - # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=page_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() - - page_result_new = node.table_checksum("t_heap") - self.assertEqual(page_result, page_result_new) - node.cleanup() + def test_basic_compression_stream_zlib(self): + self._test_compression_stream(compression = 'zlib') - # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=delta_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - node.slow_start() + @unittest.skipUnless(have_alg('lz4'), "pg_probackup is not compiled with lz4 support") + def test_basic_compression_stream_lz4(self): + self._test_compression_stream(compression = 'lz4') - delta_result_new = node.table_checksum("t_heap") - self.assertEqual(delta_result, delta_result_new) - node.cleanup() + @unittest.skipUnless(have_alg('zstd'), "pg_probackup is not compiled with zstd support") + def test_basic_compression_stream_zstd(self): + self._test_compression_stream(compression = 'zstd') - def test_compression_stream_pglz(self): + def _test_compression_stream(self, *, compression): """ make archive node, make full and page stream backups, check data correctness in restored instance """ self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL BACKUP @@ -235,9 +45,8 @@ def test_compression_stream_pglz(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,256) i") full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--stream', '--compress-algorithm=pglz']) + full_backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream', f'--compress-algorithm={compression}']) # PAGE BACKUP node.safe_psql( @@ -246,9 +55,8 @@ def test_compression_stream_pglz(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") page_result = node.table_checksum("t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--stream', '--compress-algorithm=pglz']) + page_backup_id = self.pb.backup_node('node', node, backup_type='page', + options=['--stream', f'--compress-algorithm={compression}']) # DELTA BACKUP node.safe_psql( @@ -257,23 +65,18 @@ def test_compression_stream_pglz(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,768) i") delta_result = node.table_checksum("t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--stream', '--compress-algorithm=pglz']) + delta_backup_id = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream', f'--compress-algorithm={compression}']) # Drop Node node.cleanup() # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=full_backup_id, + restore_result = self.pb.restore_node('node', node, backup_id=full_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(full_backup_id)) node.slow_start() full_result_new = node.table_checksum("t_heap") @@ -281,15 +84,11 @@ def test_compression_stream_pglz(self): node.cleanup() # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=page_backup_id, + restore_result = self.pb.restore_node('node', node, backup_id=page_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(page_backup_id)) node.slow_start() page_result_new = node.table_checksum("t_heap") @@ -297,36 +96,44 @@ def test_compression_stream_pglz(self): node.cleanup() # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=delta_backup_id, + restore_result = self.pb.restore_node('node', node, backup_id=delta_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(delta_backup_id)) node.slow_start() delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() - def test_compression_archive_pglz(self): + def test_basic_compression_archive_pglz(self): + self._test_compression_archive(compression = 'pglz') + + def test_basic_compression_archive_zlib(self): + self._test_compression_archive(compression = 'zlib') + + @unittest.skipUnless(have_alg('lz4'), "pg_probackup is not compiled with lz4 support") + def test_basic_compression_archive_lz4(self): + self._test_compression_archive(compression = 'lz4') + + @unittest.skipUnless(have_alg('zstd'), "pg_probackup is not compiled with zstd support") + def test_basic_compression_archive_zstd(self): + self._test_compression_archive(compression = 'zstd') + + def _test_compression_archive(self, *, compression): """ make archive node, make full and page backups, check data correctness in restored instance """ self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL BACKUP @@ -336,9 +143,8 @@ def test_compression_archive_pglz(self): "md5(i::text)::tsvector as tsvector " "from generate_series(0,100) i") full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', - options=['--compress-algorithm=pglz']) + full_backup_id = self.pb.backup_node('node', node, backup_type='full', + options=[f'--compress-algorithm={compression}']) # PAGE BACKUP node.safe_psql( @@ -347,9 +153,8 @@ def test_compression_archive_pglz(self): "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") page_result = node.table_checksum("t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', - options=['--compress-algorithm=pglz']) + page_backup_id = self.pb.backup_node('node', node, backup_type='page', + options=[f'--compress-algorithm={compression}']) # DELTA BACKUP node.safe_psql( @@ -358,23 +163,18 @@ def test_compression_archive_pglz(self): "md5(i::text)::tsvector as tsvector " "from generate_series(200,300) i") delta_result = node.table_checksum("t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', - options=['--compress-algorithm=pglz']) + delta_backup_id = self.pb.backup_node('node', node, backup_type='delta', + options=[f'--compress-algorithm={compression}']) # Drop Node node.cleanup() # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=full_backup_id, - options=[ - "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, backup_id=full_backup_id, + options=[ + "-j", "4", "--immediate", + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(full_backup_id)) node.slow_start() full_result_new = node.table_checksum("t_heap") @@ -382,15 +182,11 @@ def test_compression_archive_pglz(self): node.cleanup() # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=page_backup_id, + restore_result = self.pb.restore_node('node', node, backup_id=page_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(page_backup_id)) node.slow_start() page_result_new = node.table_checksum("t_heap") @@ -398,15 +194,11 @@ def test_compression_archive_pglz(self): node.cleanup() # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, backup_id=delta_backup_id, + restore_result = self.pb.restore_node('node', node, backup_id=delta_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(delta_backup_id)) node.slow_start() delta_result_new = node.table_checksum("t_heap") @@ -419,33 +211,19 @@ def test_compression_wrong_algorithm(self): check data correctness in restored instance """ self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=['--compress-algorithm=bla-blah']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because compress-algorithm is invalid.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertEqual( - e.message, - 'ERROR: Invalid compress algorithm value "bla-blah"\n', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + backup_type='full', options=['--compress-algorithm=bla-blah'], + expect_error="because compress-algorithm is invalid") + self.assertMessage(contains='ERROR: Invalid compress algorithm value "bla-blah"') # @unittest.skip("skip") def test_incompressible_pages(self): @@ -454,28 +232,24 @@ def test_incompressible_pages(self): take backup with compression, make sure that page was not compressed, restore backup and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Full - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--compress-algorithm=zlib', '--compress-level=0']) node.pgbench_init(scale=3) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=[ '--compress-algorithm=zlib', @@ -485,7 +259,7 @@ def test_incompressible_pages(self): node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) # Physical comparison if self.paranoia: @@ -493,3 +267,91 @@ def test_incompressible_pages(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() + + def test_compression_variant_alorithms_increment_chain(self): + """ + If any algorithm isn't supported -> skip backup + 1. Full compressed [zlib, 3] backup -> change data + 2. Delta compressed [pglz, 5] -> change data + 3. Delta compressed [lz4, 9] -> change data + 4. Page compressed [zstd, 3] -> change data + Restore and compare + """ + + # Initialize instance and backup directory + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True) + total_backups = 0 + + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=10) + + # Do pglz compressed FULL backup + self.pb.backup_node("node", node, options=['--stream', + '--compress-level', '5', + '--compress-algorithm', 'pglz']) + # Check backup + show_backup = self.pb.show("node")[0] + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Do zlib compressed DELTA backup + if have_alg('zlib'): + total_backups += 1 + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + # Do backup + self.pb.backup_node("node", node, + backup_type="delta", options=['--compress-level', '3', + '--compress-algorithm', 'zlib']) + # Check backup + show_backup = self.pb.show("node")[total_backups] + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "DELTA") + + # Do lz4 compressed DELTA backup + if have_alg('lz4'): + total_backups += 1 + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + # Do backup + self.pb.backup_node("node", node, + backup_type="delta", options=['--compress-level', '9', + '--compress-algorithm', 'lz4']) + # Check backup + show_backup = self.pb.show("node")[total_backups] + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "DELTA") + + # Do zstd compressed PAGE backup + if have_alg('zstd'): + total_backups += 1 + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + # Do backup + self.pb.backup_node("node", node, + backup_type="page", options=['--compress-level', '3', + '--compress-algorithm', 'zstd']) + # Check backup + show_backup = self.pb.show("node")[total_backups] + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + pgdata = self.pgdata_content(node.data_dir) + + # Drop node and restore it + node.cleanup() + self.pb.restore_node('node', node=node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + node.cleanup() diff --git a/tests/config_test.py b/tests/config_test.py index b1a0f9295..7989a4e04 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,113 +1,107 @@ import unittest import subprocess import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest, fs_backup_class from sys import exit from shutil import copyfile -class ConfigTest(ProbackupTest, unittest.TestCase): +class ConfigTest(ProbackupTest): # @unittest.expectedFailure # @unittest.skip("skip") def test_remove_instance_config(self): """remove pg_probackup.conself.f""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.show_pb(backup_dir) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.show() + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') - conf_file = os.path.join( - backup_dir, 'backups','node', 'pg_probackup.conf') + self.remove_backup_config(backup_dir, 'node') - os.unlink(os.path.join(backup_dir, 'backups','node', 'pg_probackup.conf')) - - try: - self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because pg_probackup.conf is missing. " - ".\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: could not open file "{0}": ' - 'No such file or directory'.format(conf_file), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='page', + expect_error="because pg_probackup.conf is missing") + self.assertMessage(regex=r'ERROR: Reading instance control.*No such file') # @unittest.expectedFailure # @unittest.skip("skip") def test_corrupt_backup_content(self): """corrupt backup_content.control""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - full1_id = self.backup_node(backup_dir, 'node', node) + full1_id = self.pb.backup_node('node', node) node.safe_psql( 'postgres', 'create table t1()') - fulle2_id = self.backup_node(backup_dir, 'node', node) + fulle2_id = self.pb.backup_node('node', node) + + content = self.read_backup_file(backup_dir, 'node', fulle2_id, + 'backup_content.control') + self.write_backup_file(backup_dir, 'node', full1_id, + 'backup_content.control', content) - fulle1_conf_file = os.path.join( - backup_dir, 'backups','node', full1_id, 'backup_content.control') + self.pb.validate('node', + expect_error="because pg_probackup.conf is missing") + self.assertMessage(regex="WARNING: Invalid CRC of backup control file " + fr".*{full1_id}") + self.assertMessage(contains=f"WARNING: Failed to get file list for backup {full1_id}") + self.assertMessage(contains=f"WARNING: Backup {full1_id} file list is corrupted") - fulle2_conf_file = os.path.join( - backup_dir, 'backups','node', fulle2_id, 'backup_content.control') + self.pb.show('node', full1_id)['status'] - copyfile(fulle2_conf_file, fulle1_conf_file) + self.assertEqual(self.pb.show('node')[0]['status'], 'CORRUPT') + self.assertEqual(self.pb.show('node')[1]['status'], 'OK') + @unittest.skipUnless(fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_basic_dry_run_set_config(self): + """Check set-config command witch dry-run option""" + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.show() + self.pb.set_archiving('node', node) + node.slow_start() + + conf_file = os.path.join(backup_dir, 'backups', 'node', 'pg_probackup.conf') + with open(conf_file) as cf: + cf_before = cf.read() + self.pb.set_config('node', options=['--dry-run']) + with open(conf_file) as cf: + cf_after = cf.read() + # Compare content of conf_file after dry-run + self.assertTrue(cf_before==cf_after) + + #Check access suit - if disk mounted as read_only + dir_path = os.path.join(backup_dir, 'backups', 'node') + dir_mode = os.stat(dir_path).st_mode + os.chmod(dir_path, 0o500) + + error_message = self.pb.set_config('node', options=['--dry-run'], expect_error ='because of changed permissions') try: - self.validate_pb(backup_dir, 'node') - self.assertEqual( - 1, 0, - "Expecting Error because pg_probackup.conf is missing. " - ".\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "WARNING: Invalid CRC of backup control file '{0}':".format(fulle1_conf_file), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "WARNING: Failed to get file list for backup {0}".format(full1_id), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - "WARNING: Backup {0} file list is corrupted".format(full1_id), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.show_pb(backup_dir, 'node', full1_id)['status'] - - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], 'CORRUPT') - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], 'OK') + self.assertMessage(error_message, contains='ERROR: Check permissions ') + finally: + # Cleanup + os.chmod(dir_path, dir_mode) + + node.cleanup() diff --git a/tests/delete_test.py b/tests/delete_test.py index 10100887d..761aa36f3 100644 --- a/tests/delete_test.py +++ b/tests/delete_test.py @@ -1,48 +1,53 @@ -import unittest -import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest import subprocess -class DeleteTest(ProbackupTest, unittest.TestCase): +class DeleteTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure def test_delete_full_backups(self): """delete full backups""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # full backup - self.backup_node(backup_dir, 'node', node) + id_1 = self.pb.backup_node('node', node) + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + + id_2 = self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node) + id_2_1 = self.pb.backup_node('node', node, backup_type = "delta") pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node) + id_3 = self.pb.backup_node('node', node) - show_backups = self.show_pb(backup_dir, 'node') - id_1 = show_backups[0]['id'] - id_2 = show_backups[1]['id'] - id_3 = show_backups[2]['id'] - self.delete_pb(backup_dir, 'node', id_2) - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') + self.assertEqual(show_backups[0]['id'], id_1) + self.assertEqual(show_backups[1]['id'], id_2) + self.assertEqual(show_backups[2]['id'], id_2_1) + self.assertEqual(show_backups[3]['id'], id_3) + + self.pb.delete('node', id_2) + show_backups = self.pb.show('node') + self.assertEqual(len(show_backups), 2) self.assertEqual(show_backups[0]['id'], id_1) self.assertEqual(show_backups[1]['id'], id_3) @@ -50,63 +55,55 @@ def test_delete_full_backups(self): # @unittest.expectedFailure def test_del_instance_archive(self): """delete full backups""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # full backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # full backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # restore node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() # Delete instance - self.del_instance(backup_dir, 'node') + self.pb.del_instance('node') # @unittest.skip("skip") # @unittest.expectedFailure def test_delete_archive_mix_compress_and_non_compressed_segments(self): """delete full backups""" - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving( - backup_dir, 'node', node, compress=False) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node, compress=False) node.slow_start() # full backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=10) # Restart archiving with compression - self.set_archiving(backup_dir, 'node', node, compress=True) + self.pb.set_archiving('node', node, compress=True) node.restart() # full backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--retention-redundancy=3', '--delete-expired']) @@ -114,8 +111,7 @@ def test_delete_archive_mix_compress_and_non_compressed_segments(self): pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--retention-redundancy=3', '--delete-expired']) @@ -123,41 +119,37 @@ def test_delete_archive_mix_compress_and_non_compressed_segments(self): pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--retention-redundancy=3', '--delete-expired']) # @unittest.skip("skip") - def test_delete_increment_page(self): + def test_basic_delete_increment_page(self): """delete increment and all after him""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # full backup mode - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # page backup mode - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") # page backup mode - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") # full backup mode - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') self.assertEqual(len(show_backups), 4) # delete first page backup - self.delete_pb(backup_dir, 'node', show_backups[1]['id']) + self.pb.delete('node', show_backups[1]['id']) - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') self.assertEqual(len(show_backups), 2) self.assertEqual(show_backups[0]['backup-mode'], "FULL") @@ -171,15 +163,12 @@ def test_delete_increment_ptrack(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + ptrack_enable=self.ptrack) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -187,21 +176,21 @@ def test_delete_increment_ptrack(self): 'CREATE EXTENSION ptrack') # full backup mode - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # ptrack backup mode - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + self.pb.backup_node('node', node, backup_type="ptrack") # ptrack backup mode - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + self.pb.backup_node('node', node, backup_type="ptrack") # full backup mode - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') self.assertEqual(len(show_backups), 4) # delete first page backup - self.delete_pb(backup_dir, 'node', show_backups[1]['id']) + self.pb.delete('node', show_backups[1]['id']) - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') self.assertEqual(len(show_backups), 2) self.assertEqual(show_backups[0]['backup-mode'], "FULL") @@ -210,71 +199,68 @@ def test_delete_increment_ptrack(self): self.assertEqual(show_backups[1]['status'], "OK") # @unittest.skip("skip") - def test_delete_orphaned_wal_segments(self): + def test_basic_delete_orphaned_wal_segments(self): """ make archive node, make three full backups, delete second backup without --wal option, then delete orphaned wals via --wal option """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( "postgres", "create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i") # first full backup - backup_1_id = self.backup_node(backup_dir, 'node', node) + backup_1_id = self.pb.backup_node('node', node) # second full backup - backup_2_id = self.backup_node(backup_dir, 'node', node) + backup_2_id = self.pb.backup_node('node', node) # third full backup - backup_3_id = self.backup_node(backup_dir, 'node', node) + backup_3_id = self.pb.backup_node('node', node) node.stop() # Check wals - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] + wals = self.get_instance_wal_list(backup_dir, 'node') original_wal_quantity = len(wals) # delete second full backup - self.delete_pb(backup_dir, 'node', backup_2_id) + self.pb.delete('node', backup_2_id) # check wal quantity - self.validate_pb(backup_dir) - self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK") - self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") + self.pb.validate() + self.assertEqual(self.pb.show('node', backup_1_id)['status'], "OK") + self.assertEqual(self.pb.show('node', backup_3_id)['status'], "OK") # try to delete wals for second backup - self.delete_pb(backup_dir, 'node', options=['--wal']) + self.pb.delete('node', options=['--wal']) # check wal quantity - self.validate_pb(backup_dir) - self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK") - self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") + self.pb.validate() + self.assertEqual(self.pb.show('node', backup_1_id)['status'], "OK") + self.assertEqual(self.pb.show('node', backup_3_id)['status'], "OK") # delete first full backup - self.delete_pb(backup_dir, 'node', backup_1_id) - self.validate_pb(backup_dir) - self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") + self.pb.delete('node', backup_1_id) + self.pb.validate() + self.assertEqual(self.pb.show('node', backup_3_id)['status'], "OK") - result = self.delete_pb(backup_dir, 'node', options=['--wal']) + result = self.pb.delete('node', options=['--wal']) # delete useless wals self.assertTrue('On timeline 1 WAL segments between ' in result and 'will be removed' in result) - self.validate_pb(backup_dir) - self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK") + self.pb.validate() + self.assertEqual(self.pb.show('node', backup_3_id)['status'], "OK") # Check quantity, it should be lower than original - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] - self.assertTrue(original_wal_quantity > len(wals), "Number of wals not changed after 'delete --wal' which is illegal") + wals = self.get_instance_wal_list(backup_dir, 'node') + self.assertGreater(original_wal_quantity, len(wals), "Number of wals not changed after 'delete --wal' which is illegal") # Delete last backup - self.delete_pb(backup_dir, 'node', backup_3_id, options=['--wal']) - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] + self.pb.delete('node', backup_3_id, options=['--wal']) + wals = self.get_instance_wal_list(backup_dir, 'node') self.assertEqual (0, len(wals), "Number of wals should be equal to 0") # @unittest.skip("skip") @@ -287,45 +273,41 @@ def test_delete_wal_between_multiple_timelines(self): [A1, B1) are deleted and backups B1 and A2 keep their WAL """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - A1 = self.backup_node(backup_dir, 'node', node) + A1 = self.pb.backup_node('node', node) # load some data to node node.pgbench_init(scale=3) - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.pg_node.make_simple('node2') node2.cleanup() - self.restore_node(backup_dir, 'node', node2) - self.set_auto_conf(node2, {'port': node2.port}) + self.pb.restore_node('node', node=node2) + node2.set_auto_conf({'port': node2.port}) node2.slow_start() # load some more data to node node.pgbench_init(scale=3) # take A2 - A2 = self.backup_node(backup_dir, 'node', node) + A2 = self.pb.backup_node('node', node) # load some more data to node2 node2.pgbench_init(scale=2) - B1 = self.backup_node( - backup_dir, 'node', + B1 = self.pb.backup_node('node', node2, data_dir=node2.data_dir) - self.delete_pb(backup_dir, 'node', backup_id=A1, options=['--wal']) + self.pb.delete('node', backup_id=A1, options=['--wal']) - self.validate_pb(backup_dir) + self.pb.validate() # @unittest.skip("skip") def test_delete_backup_with_empty_control_file(self): @@ -333,53 +315,43 @@ def test_delete_backup_with_empty_control_file(self): take backup, truncate its control file, try to delete it via 'delete' command """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # full backup mode - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # page backup mode - self.backup_node( - backup_dir, 'node', node, backup_type="delta", options=['--stream']) + self.pb.backup_node('node', node, backup_type="delta", options=['--stream']) # page backup mode - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", options=['--stream']) + backup_id = self.pb.backup_node('node', node, backup_type="delta", options=['--stream']) - with open( - os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.control'), - 'wt') as f: - f.flush() - f.close() + with self.modify_backup_control(backup_dir, 'node', backup_id) as cf: + cf.data = '' - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') self.assertEqual(len(show_backups), 3) - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # @unittest.skip("skip") def test_delete_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) + backup_id_b = self.pb.backup_node('node', node) # Change FULLb to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') @@ -388,8 +360,7 @@ def test_delete_interleaved_incremental_chains(self): # FULLa OK # Take PAGEa1 backup - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') # PAGEa1 OK # FULLb ERROR @@ -405,8 +376,7 @@ def test_delete_interleaved_incremental_chains(self): # FULLb OK # FULLa OK - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b1 = self.pb.backup_node('node', node, backup_type='page') # PAGEb1 OK # PAGEa1 ERROR @@ -426,8 +396,7 @@ def test_delete_interleaved_incremental_chains(self): # FULLb ERROR # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # PAGEa2 OK # PAGEb1 ERROR @@ -449,8 +418,7 @@ def test_delete_interleaved_incremental_chains(self): # FULLb OK # FULLa ERROR - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b2 = self.pb.backup_node('node', node, backup_type='page') # Change PAGEa2 and FULLa status to OK self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') @@ -463,8 +431,8 @@ def test_delete_interleaved_incremental_chains(self): # FULLb OK # FULLa OK - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') # PAGEc1 OK # FULLc OK @@ -476,17 +444,15 @@ def test_delete_interleaved_incremental_chains(self): # FULLa OK # Delete FULLb - self.delete_pb( - backup_dir, 'node', backup_id_b) + self.pb.delete('node', backup_id_b) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 5) + self.assertEqual(len(self.pb.show('node')), 5) - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) # @unittest.skip("skip") def test_delete_multiple_descendants(self): - """ + r""" PAGEb3 | PAGEa3 PAGEb2 / @@ -496,25 +462,22 @@ def test_delete_multiple_descendants(self): FULLb | FULLa should be deleted """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) + backup_id_b = self.pb.backup_node('node', node) # Change FULLb to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') # Change FULLb to OK self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') @@ -526,8 +489,7 @@ def test_delete_multiple_descendants(self): # FULLb OK # FULLa OK - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b1 = self.pb.backup_node('node', node, backup_type='page') # PAGEb1 OK # PAGEa1 ERROR @@ -546,8 +508,7 @@ def test_delete_multiple_descendants(self): # FULLb ERROR # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # PAGEa2 OK # PAGEb1 ERROR @@ -569,8 +530,7 @@ def test_delete_multiple_descendants(self): # FULLb OK # FULLa ERROR - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b2 = self.pb.backup_node('node', node, backup_type='page') # PAGEb2 OK # PAGEa2 ERROR @@ -594,8 +554,7 @@ def test_delete_multiple_descendants(self): # FULLb ERROR # FULLa OK - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a3 = self.pb.backup_node('node', node, backup_type='page') # PAGEa3 OK # PAGEb2 ERROR @@ -612,8 +571,7 @@ def test_delete_multiple_descendants(self): self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b3 = self.pb.backup_node('node', node, backup_type='page') # PAGEb3 OK # PAGEa3 ERROR @@ -640,21 +598,21 @@ def test_delete_multiple_descendants(self): # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + self.pb.show('node', backup_id=page_id_a3)['parent-backup-id'], page_id_a1) self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + self.pb.show('node', backup_id=page_id_a2)['parent-backup-id'], page_id_a1) # Delete FULLa - self.delete_pb(backup_dir, 'node', backup_id_a) + self.pb.delete('node', backup_id_a) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + self.assertEqual(len(self.pb.show('node')), 4) # @unittest.skip("skip") def test_delete_multiple_descendants_dry_run(self): - """ + r""" PAGEa3 PAGEa2 / \ / @@ -662,29 +620,25 @@ def test_delete_multiple_descendants_dry_run(self): | FULLa """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL BACKUP node.pgbench_init(scale=1) - backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # Change PAGEa2 to ERROR @@ -692,15 +646,13 @@ def test_delete_multiple_descendants_dry_run(self): pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a3 = self.pb.backup_node('node', node, backup_type='page') # Change PAGEa2 to ERROR self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') # Delete PAGEa1 - output = self.delete_pb( - backup_dir, 'node', page_id_a1, + output = self.pb.delete('node', page_id_a1, options=['--dry-run', '--log-level-console=LOG', '--delete-wal']) print(output) @@ -719,15 +671,13 @@ def test_delete_multiple_descendants_dry_run(self): 'delete of backup {0} :'.format(page_id_a1), output) - self.assertIn( - 'On timeline 1 WAL segments between 000000010000000000000001 ' - 'and 000000010000000000000003 can be removed', - output) + self.assertRegex(output, + r'On timeline 1 WAL segments between 000000010000000000000001 ' + r'and 00000001000000000000000\d can be removed') - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + self.assertEqual(len(self.pb.show('node')), 4) - output = self.delete_pb( - backup_dir, 'node', page_id_a1, + output = self.pb.delete('node', page_id_a1, options=['--log-level-console=LOG', '--delete-wal']) self.assertIn( @@ -744,60 +694,57 @@ def test_delete_multiple_descendants_dry_run(self): 'delete of backup {0} :'.format(page_id_a1), output) - self.assertIn( - 'On timeline 1 WAL segments between 000000010000000000000001 ' - 'and 000000010000000000000003 will be removed', - output) + self.assertRegex(output, + r'On timeline 1 WAL segments between 000000010000000000000001 ' + r'and 00000001000000000000000\d will be removed') - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) + self.assertEqual(len(self.pb.show('node')), 1) - self.validate_pb(backup_dir, 'node') + self.pb.validate('node') def test_delete_error_backups(self): """delete increment and all after him""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # full backup mode - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # page backup mode - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") # Take FULL BACKUP - backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) # Take PAGE BACKUP - backup_id_b = self.backup_node(backup_dir, 'node', node, backup_type="page") + backup_id_b = self.pb.backup_node('node', node, backup_type="page") - backup_id_c = self.backup_node(backup_dir, 'node', node, backup_type="page") + backup_id_c = self.pb.backup_node('node', node, backup_type="page") - backup_id_d = self.backup_node(backup_dir, 'node', node, backup_type="page") + backup_id_d = self.pb.backup_node('node', node, backup_type="page") # full backup mode - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") - backup_id_e = self.backup_node(backup_dir, 'node', node, backup_type="page") - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type="page") + backup_id_e = self.pb.backup_node('node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") # Change status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR') self.change_backup_status(backup_dir, 'node', backup_id_c, 'ERROR') self.change_backup_status(backup_dir, 'node', backup_id_e, 'ERROR') - print(self.show_pb(backup_dir, as_text=True, as_json=False)) + print(self.pb.show(as_text=True, as_json=False)) - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') self.assertEqual(len(show_backups), 10) # delete error backups - output = self.delete_pb(backup_dir, 'node', options=['--status=ERROR', '--dry-run']) - show_backups = self.show_pb(backup_dir, 'node') + output = self.pb.delete('node', options=['--status=ERROR', '--dry-run']) + show_backups = self.pb.show('node') self.assertEqual(len(show_backups), 10) self.assertIn( @@ -808,15 +755,46 @@ def test_delete_error_backups(self): "INFO: Backup {0} with status OK can be deleted".format(backup_id_d), output) - print(self.show_pb(backup_dir, as_text=True, as_json=False)) + print(self.pb.show(as_text=True, as_json=False)) - show_backups = self.show_pb(backup_dir, 'node') - output = self.delete_pb(backup_dir, 'node', options=['--status=ERROR']) + show_backups = self.pb.show('node') + output = self.pb.delete('node', options=['--status=ERROR']) print(output) - show_backups = self.show_pb(backup_dir, 'node') + show_backups = self.pb.show('node') self.assertEqual(len(show_backups), 4) self.assertEqual(show_backups[0]['status'], "OK") self.assertEqual(show_backups[1]['status'], "OK") self.assertEqual(show_backups[2]['status'], "OK") self.assertEqual(show_backups[3]['status'], "OK") + +########################################################################### +# dry-run +########################################################################### + + def test_basic_dry_run_del_instance(self): + """ Check del-instance command with dry-run option""" + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # full backup + self.pb.backup_node('node', node) + # restore + node.cleanup() + self.pb.restore_node('node', node=node) + node.slow_start() + + content_before = self.pgdata_content(self.backup_dir) + # Delete instance + self.pb.del_instance('node', options=['--dry-run']) + + self.compare_instance_dir( + content_before, + self.pgdata_content(self.backup_dir) + ) + + node.cleanup() \ No newline at end of file diff --git a/tests/delta_test.py b/tests/delta_test.py index 8736a079c..6bf7f9d9a 100644 --- a/tests/delta_test.py +++ b/tests/delta_test.py @@ -1,14 +1,11 @@ import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -from testgres import QueryException -import subprocess -import time + +from .helpers.ptrack_helpers import ProbackupTest +from pg_probackup2.gdb import needs_gdb from threading import Thread -class DeltaTest(ProbackupTest, unittest.TestCase): +class DeltaTest(ProbackupTest): # @unittest.skip("skip") def test_basic_delta_vacuum_truncate(self): @@ -16,20 +13,17 @@ def test_basic_delta_vacuum_truncate(self): make node, create table, take full backup, delete last 3 pages, vacuum relation, take delta backup, take second delta backup, - restore latest delta backup and check data correctness + restore the latest delta backup and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node_restored.cleanup() node.slow_start() @@ -45,7 +39,7 @@ def test_basic_delta_vacuum_truncate(self): "postgres", "vacuum t_heap") - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql( "postgres", @@ -55,22 +49,19 @@ def test_basic_delta_vacuum_truncate(self): "postgres", "vacuum t_heap") - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) - self.restore_node( - backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") @@ -81,19 +72,16 @@ def test_delta_vacuum_truncate_1(self): take delta backup, take second delta backup, restore latest delta backup and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], ) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + node_restored = self.pg_node.make_simple('node_restored', ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node_restored.cleanup() node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -112,7 +100,7 @@ def test_delta_vacuum_truncate_1(self): "vacuum t_heap" ) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -124,12 +112,10 @@ def test_delta_vacuum_truncate_1(self): "vacuum t_heap" ) - self.backup_node( - backup_dir, 'node', node, backup_type='delta' + self.pb.backup_node('node', node, backup_type='delta' ) - self.backup_node( - backup_dir, 'node', node, backup_type='delta' + self.pb.backup_node('node', node, backup_type='delta' ) pgdata = self.pgdata_content(node.data_dir) @@ -137,8 +123,7 @@ def test_delta_vacuum_truncate_1(self): old_tablespace = self.get_tblspace_path(node, 'somedata') new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - self.restore_node( - backup_dir, + self.pb.restore_node( 'node', node_restored, options=[ @@ -150,7 +135,7 @@ def test_delta_vacuum_truncate_1(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") @@ -161,19 +146,16 @@ def test_delta_vacuum_truncate_2(self): take delta backup, take second delta backup, restore latest delta backup and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], ) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + node_restored = self.pg_node.make_simple('node_restored', ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node_restored.cleanup() node.slow_start() @@ -188,27 +170,24 @@ def test_delta_vacuum_truncate_2(self): "select pg_relation_filepath('t_heap')" ).decode('utf-8').rstrip() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) print(os.path.join(node.data_dir, filepath + '.1')) os.unlink(os.path.join(node.data_dir, filepath + '.1')) - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) - self.restore_node( - backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") @@ -217,19 +196,17 @@ def test_delta_stream(self): make archive node, take full and delta stream backups, restore them and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s' } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL BACKUP @@ -240,8 +217,7 @@ def test_delta_stream(self): "from generate_series(0,100) i") full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, + full_backup_id = self.pb.backup_node('node', node, backup_type='full', options=['--stream']) # delta BACKUP @@ -251,40 +227,32 @@ def test_delta_stream(self): "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") delta_result = node.table_checksum("t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, + delta_backup_id = self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) # Drop Node node.cleanup() # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=full_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(full_backup_id)) node.slow_start() full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() # Check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=delta_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(delta_backup_id)) + node.slow_start() delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) @@ -297,15 +265,13 @@ def test_delta_archive(self): restore them and check data correctness """ self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL BACKUP @@ -314,8 +280,7 @@ def test_delta_archive(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full') + full_backup_id = self.pb.backup_node('node', node, backup_type='full') # delta BACKUP node.safe_psql( @@ -323,39 +288,30 @@ def test_delta_archive(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,2) i") delta_result = node.table_checksum("t_heap") - delta_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + delta_backup_id = self.pb.backup_node('node', node, backup_type='delta') # Drop Node node.cleanup() # Restore and check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=full_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(full_backup_id)) node.slow_start() full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() # Restore and check delta backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(delta_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=delta_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(delta_backup_id)) node.slow_start() delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) @@ -367,11 +323,9 @@ def test_delta_multiple_segments(self): Make node, create table with multiple segments, write some data to it, check delta and data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'fsync': 'off', 'shared_buffers': '1GB', @@ -380,9 +334,9 @@ def test_delta_multiple_segments(self): } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - # self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + # self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -392,7 +346,7 @@ def test_delta_multiple_segments(self): scale=100, options=['--tablespace=somedata', '--no-vacuum']) # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # PGBENCH STUFF pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum']) @@ -402,22 +356,19 @@ def test_delta_multiple_segments(self): # GET LOGICAL CONTENT FROM NODE result = node.table_checksum("pgbench_accounts") # delta BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) # GET PHYSICAL CONTENT FROM NODE pgdata = self.pgdata_content(node.data_dir) # RESTORE NODE - restored_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) + restored_node = self.pg_node.make_simple('restored_node') restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( restored_node, 'somedata_restored') - self.restore_node( - backup_dir, 'node', restored_node, + self.pb.restore_node('node', restored_node, options=[ "-j", "4", "-T", "{0}={1}".format( tblspc_path, tblspc_path_new)]) @@ -426,7 +377,7 @@ def test_delta_multiple_segments(self): pgdata_restored = self.pgdata_content(restored_node.data_dir) # START RESTORED NODE - self.set_auto_conf(restored_node, {'port': restored_node.port}) + restored_node.set_auto_conf({'port': restored_node.port}) restored_node.slow_start() result_new = restored_node.table_checksum("pgbench_accounts") @@ -438,29 +389,26 @@ def test_delta_multiple_segments(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @needs_gdb def test_delta_vacuum_full(self): """ make node, make full and delta stream backups, restore them and check data correctness """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql( "postgres", @@ -479,27 +427,24 @@ def test_delta_vacuum_full(self): target=pg_connect.execute, args=["VACUUM FULL t_heap"]) process.start() - while not gdb.stopped_in_breakpoint: - time.sleep(1) + gdb.stopped_in_breakpoint() gdb.continue_execution_until_break(20) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) gdb.remove_all_breakpoints() - gdb._execute('detach') + gdb.detach() process.join() old_tablespace = self.get_tblspace_path(node, 'somedata') new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=["-j", "4", "-T", "{0}={1}".format( old_tablespace, new_tablespace)]) @@ -508,7 +453,7 @@ def test_delta_vacuum_full(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() @@ -518,18 +463,16 @@ def test_create_db(self): Make node, take full backup, create database db1, take delta backup, restore database and check it presense """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '10GB', } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL BACKUP @@ -539,8 +482,7 @@ def test_create_db(self): "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") node.table_checksum("t_heap") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=["--stream"]) # CREATE DATABASE DB1 @@ -551,8 +493,7 @@ def test_create_db(self): "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") # DELTA BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=["--stream"] ) @@ -561,13 +502,10 @@ def test_create_db(self): pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') - ) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, + self.pb.restore_node( 'node', node_restored, backup_id=backup_id, @@ -582,15 +520,14 @@ def test_create_db(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # DROP DATABASE DB1 node.safe_psql( "postgres", "drop database db1") # SECOND DELTA BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=["--stream"] ) @@ -599,8 +536,7 @@ def test_create_db(self): # RESTORE SECOND DELTA BACKUP node_restored.cleanup() - self.restore_node( - backup_dir, + self.pb.restore_node( 'node', node_restored, backup_id=backup_id, @@ -616,22 +552,11 @@ def test_create_db(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() - try: - node_restored.safe_psql('db1', 'select 1') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are connecting to deleted database" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except QueryException as e: - self.assertTrue( - 'FATAL: database "db1" does not exist' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + error_result = node_restored.safe_psql('db1', 'select 1', expect_error=True) + self.assertMessage(error_result, contains='FATAL: database "db1" does not exist') # @unittest.skip("skip") def test_exists_in_previous_backup(self): @@ -639,20 +564,18 @@ def test_exists_in_previous_backup(self): Make node, take full backup, create table, take page backup, take delta backup, check that file is no fully copied to delta backup """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '10GB', 'checkpoint_timeout': '5min', } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL BACKUP @@ -665,23 +588,20 @@ def test_exists_in_previous_backup(self): filepath = node.safe_psql( "postgres", "SELECT pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - self.backup_node( - backup_dir, + self.pb.backup_node( 'node', node, options=["--stream"]) # PAGE BACKUP - backup_id = self.backup_node( - backup_dir, + backup_id = self.pb.backup_node( 'node', node, backup_type='page' ) - fullpath = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'database', filepath) - self.assertFalse(os.path.exists(fullpath)) + self.assertFalse(self.backup_file_exists(backup_dir, 'node', backup_id, + f'database/{filepath}')) # if self.paranoia: # pgdata_page = self.pgdata_content( @@ -690,8 +610,7 @@ def test_exists_in_previous_backup(self): # 'node', backup_id, 'database')) # DELTA BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=["--stream"] ) @@ -703,21 +622,18 @@ def test_exists_in_previous_backup(self): # self.compare_pgdata( # pgdata_page, pgdata_delta) - fullpath = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'database', filepath) - self.assertFalse(os.path.exists(fullpath)) + self.assertFalse(self.backup_file_exists(backup_dir, 'node', backup_id, + f'database/{filepath}')) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + node_restored = self.pg_node.make_simple('node_restored' ) node_restored.cleanup() - self.restore_node( - backup_dir, + self.pb.restore_node( 'node', node_restored, backup_id=backup_id, @@ -732,7 +648,7 @@ def test_exists_in_previous_backup(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") @@ -741,17 +657,15 @@ def test_alter_table_set_tablespace_delta(self): Make node, create tablespace with table, take full backup, alter tablespace location, take delta backup, restore database. """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', + set_replication=True, pg_options={ 'checkpoint_timeout': '30s', } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL BACKUP @@ -763,7 +677,7 @@ def test_alter_table_set_tablespace_delta(self): " from generate_series(0,100) i") # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) # ALTER TABLESPACE self.create_tblspace_in_node(node, 'somedata_new') @@ -773,8 +687,7 @@ def test_alter_table_set_tablespace_delta(self): # DELTA BACKUP result = node.table_checksum("t_heap") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=["--stream"]) @@ -782,12 +695,10 @@ def test_alter_table_set_tablespace_delta(self): pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -807,7 +718,7 @@ def test_alter_table_set_tablespace_delta(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() result_new = node_restored.table_checksum("t_heap") @@ -821,20 +732,18 @@ def test_alter_database_set_tablespace_delta(self): take delta backup, alter database tablespace location, take delta backup restore last delta backup. """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) # CREATE DATABASE DB1 node.safe_psql( @@ -846,8 +755,7 @@ def test_alter_database_set_tablespace_delta(self): "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=["--stream"] ) @@ -860,8 +768,7 @@ def test_alter_database_set_tablespace_delta(self): ) # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=["--stream"] ) @@ -870,13 +777,11 @@ def test_alter_database_set_tablespace_delta(self): pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + node_restored = self.pg_node.make_simple('node_restored' ) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -896,7 +801,7 @@ def test_alter_database_set_tablespace_delta(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") @@ -905,24 +810,22 @@ def test_delta_delete(self): Make node, create tablespace with table, take full backup, alter tablespace location, take delta backup, restore database. """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', + set_replication=True, pg_options={ 'checkpoint_timeout': '30s', } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) node.safe_psql( "postgres", @@ -942,8 +845,7 @@ def test_delta_delete(self): ) # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=["--stream"] ) @@ -952,13 +854,10 @@ def test_delta_delete(self): pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') - ) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -974,7 +873,7 @@ def test_delta_delete(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() def test_delta_nullified_heap_page_backup(self): @@ -982,14 +881,12 @@ def test_delta_nullified_heap_page_backup(self): make node, take full backup, nullify some heap block, take delta backup, restore, physically compare pgdata`s """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=1) @@ -1002,8 +899,7 @@ def test_delta_nullified_heap_page_backup(self): "postgres", "CHECKPOINT") - self.backup_node( - backup_dir, 'node', node) + self.pb.backup_node('node', node) # Nullify some block in PostgreSQL file = os.path.join(node.data_dir, file_path).replace("\\", "/") @@ -1014,20 +910,18 @@ def test_delta_nullified_heap_page_backup(self): f.seek(8192) f.write(b"\x00"*8192) f.flush() - f.close - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=["--log-level-file=verbose"]) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) + content = self.read_pb_log() + self.assertIn( + 'VERBOSE: File: {0} blknum 1, empty zeroed page'.format(file_path), + content) if not self.remote: - log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log") - with open(log_file_path) as f: - content = f.read() - self.assertIn( 'VERBOSE: File: "{0}" blknum 1, empty page'.format(file), content) @@ -1036,12 +930,10 @@ def test_delta_nullified_heap_page_backup(self): content) # Restore DELTA backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -1052,66 +944,49 @@ def test_delta_backup_from_past(self): make node, take FULL stream backup, take DELTA stream backup, restore FULL backup, try to take second DELTA stream backup """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + instance_name = 'node' + node = self.pg_node.make_simple(instance_name, + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance(instance_name, node) node.slow_start() - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node(instance_name, node, options=['--stream']) node.pgbench_init(scale=3) # First DELTA - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node(instance_name, node, backup_type='delta', options=['--stream']) # Restore FULL backup node.cleanup() - self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + self.pb.restore_node(instance_name, node, backup_id=backup_id) node.slow_start() # Second DELTA backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are backing up an instance from the past" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Current START LSN ' in e.message and - 'is lower than START LSN ' in e.message and - 'of previous backup ' in e.message and - 'It may indicate that we are trying ' - 'to backup PostgreSQL instance from the past' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - @unittest.skip("skip") + error_result = self.pb.backup_node( instance_name, node, + backup_type='delta', + options=['--stream'], + expect_error=True) + + self.assertMessage(error_result, regex=r'Current START LSN (\d+)/(\d+) is lower than START LSN (\d+)/(\d+) ' + r'of previous backup \w{6}. It may indicate that we are trying ' + r'to backup PostgreSQL instance from the past.') + # @unittest.expectedFailure def test_delta_pg_resetxlog(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - 'shared_buffers': '512MB', - 'max_wal_size': '3GB'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + instance_name = 'node' + node = self.pg_node.make_simple(instance_name, + set_replication=True, + pg_options={'shared_buffers': '512MB', + 'max_wal_size': '3GB'}) + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance(instance_name, node) node.slow_start() # Create table @@ -1121,11 +996,9 @@ def test_delta_pg_resetxlog(self): "create table t_heap " "as select nextval('t_seq')::int as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " -# "from generate_series(0,25600) i") "from generate_series(0,2560) i") - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node(instance_name, node, options=['--stream']) node.safe_psql( 'postgres', @@ -1140,12 +1013,10 @@ def test_delta_pg_resetxlog(self): # now smack it with sledgehammer if node.major_version >= 10: pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' else: pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - self.run_binary( + self.pb.run_binary( [ pg_resetxlog_path, '-D', @@ -1161,37 +1032,23 @@ def test_delta_pg_resetxlog(self): print("Die! Die! Why won't you die?... Why won't you die?") exit(1) - # take ptrack backup -# self.backup_node( -# backup_dir, 'node', node, -# backup_type='delta', options=['--stream']) - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance was brutalized by pg_resetxlog" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertIn( - 'Insert error message', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) - -# pgdata = self.pgdata_content(node.data_dir) -# -# node_restored = self.make_simple_node( -# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) -# node_restored.cleanup() -# -# self.restore_node( -# backup_dir, 'node', node_restored) -# -# pgdata_restored = self.pgdata_content(node_restored.data_dir) -# self.compare_pgdata(pgdata, pgdata_restored) + backup_id = self.pb.backup_node(instance_name, + node, + backup_type='delta', + options=['--stream']) + self.pb.validate(instance_name, backup_id) + + def test_delta_backup_before_full_will_fail(self): + instance_name = 'node' + node = self.pg_node.make_simple( + base_dir=instance_name) + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) + node.slow_start() + + error_result = self.pb.backup_node(instance_name, node, backup_type="delta", expect_error=True) + self.assertMessage(error_result, + contains='ERROR: could not open file "pg_wal/00000001.history": No such file or directory') diff --git a/tests/exclude_test.py b/tests/exclude_test.py index cb3530cd5..1d6485ce8 100644 --- a/tests/exclude_test.py +++ b/tests/exclude_test.py @@ -1,25 +1,23 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest -class ExcludeTest(ProbackupTest, unittest.TestCase): +class ExcludeTest(ProbackupTest): # @unittest.skip("skip") def test_exclude_temp_files(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'logging_collector': 'on', 'log_filename': 'postgresql.log'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() oid = node.safe_psql( @@ -36,16 +34,13 @@ def test_exclude_temp_files(self): f.flush() f.close - full_id = self.backup_node( - backup_dir, 'node', node, backup_type='full', options=['--stream']) - - file = os.path.join( - backup_dir, 'backups', 'node', full_id, - 'database', 'base', 'pgsql_tmp', 'pgsql_tmp7351.16') + full_id = self.pb.backup_node('node', node, backup_type='full', options=['--stream']) self.assertFalse( - os.path.exists(file), - "File must be excluded: {0}".format(file)) + self.backup_file_exists(backup_dir, 'node', full_id, + 'database/base/pgsql_tmp/pgsql_tmp7351.16'), + "File must be excluded: database/base/pgsql_tmp/pgsql_tmp7351.16" + ) # TODO check temporary tablespaces @@ -56,14 +51,12 @@ def test_exclude_temp_tables(self): make node without archiving, create temp table, take full backup, check that temp table not present in backup catalogue """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() with node.connect("postgres") as conn: @@ -112,8 +105,7 @@ def test_exclude_temp_tables(self): temp_toast_filename = os.path.basename(toast_path) temp_idx_toast_filename = os.path.basename(toast_idx_path) - self.backup_node( - backup_dir, 'node', node, backup_type='full', options=['--stream']) + self.pb.backup_node('node', node, backup_type='full', options=['--stream']) for root, dirs, files in os.walk(backup_dir): for file in files: @@ -138,16 +130,14 @@ def test_exclude_unlogged_tables_1(self): alter table to unlogged, take delta backup, restore delta backup, check that PGDATA`s are physically the same """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ "shared_buffers": "10MB"}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() conn = node.connect() @@ -161,25 +151,21 @@ def test_exclude_unlogged_tables_1(self): conn.execute("create index test_idx on test (generate_series)") conn.commit() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='full', options=['--stream']) node.safe_psql('postgres', "alter table test set logged") - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -193,17 +179,15 @@ def test_exclude_unlogged_tables_2(self): 2. restore FULL, DELTA, PAGE to empty db, ensure unlogged table exist and is epmty """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ "shared_buffers": "10MB"}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() backup_ids = [] @@ -223,14 +207,12 @@ def test_exclude_unlogged_tables_2(self): 'postgres', "select pg_relation_filepath('test')")[0][0] - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type=backup_type, options=['--stream']) backup_ids.append(backup_id) - filelist = self.get_backup_filelist( - backup_dir, 'node', backup_id) + filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) self.assertNotIn( rel_path, filelist, @@ -253,7 +235,7 @@ def test_exclude_unlogged_tables_2(self): node.stop() node.cleanup() - self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + self.pb.restore_node('node', node=node, backup_id=backup_id) node.slow_start() @@ -268,21 +250,18 @@ def test_exclude_log_dir(self): """ check that by default 'log' and 'pg_log' directories are not backed up """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'logging_collector': 'on', 'log_filename': 'postgresql.log'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='full', options=['--stream']) log_dir = node.safe_psql( @@ -291,8 +270,7 @@ def test_exclude_log_dir(self): node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node, options=["-j", "4"]) # check that PGDATA/log or PGDATA/pg_log do not exists path = os.path.join(node.data_dir, log_dir) @@ -305,31 +283,27 @@ def test_exclude_log_dir_1(self): """ check that "--backup-pg-log" works correctly """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'logging_collector': 'on', 'log_filename': 'postgresql.log'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() log_dir = node.safe_psql( 'postgres', 'show log_directory').decode('utf-8').rstrip() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='full', options=['--stream', '--backup-pg-log']) node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node, options=["-j", "4"]) # check that PGDATA/log or PGDATA/pg_log do not exists path = os.path.join(node.data_dir, log_dir) diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 49f79607f..31990287b 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -5,9 +5,13 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup version - pg_probackup init -B backup-path + pg_probackup init -B backup-dir + [--s3=s3-interface-provider] + [--skip-if-exists] + [--dry-run] + [--help] - pg_probackup set-config -B backup-path --instance=instance_name + pg_probackup set-config -B backup-dir --instance=instance-name [-D pgdata-path] [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] @@ -30,18 +34,22 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ssh-options] [--restore-command=cmdline] [--archive-host=destination] [--archive-port=port] [--archive-user=username] + [--write-rate-limit=baudrate] + [--dry-run] [--help] - pg_probackup set-backup -B backup-path --instance=instance_name + pg_probackup set-backup -B backup-dir --instance=instance-name -i backup-id [--ttl=interval] [--expire-time=timestamp] [--note=text] + [--dry-run] [--help] - pg_probackup show-config -B backup-path --instance=instance_name + pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] + [--no-scale-units] [--help] - pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name [-D pgdata-path] [-C] [--stream [-S slot-name] [--temp-slot]] [--backup-pg-log] [-j num-threads] [--progress] @@ -71,9 +79,13 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--ttl=interval] [--expire-time=timestamp] [--note=text] + [--write-rate-limit=baudrate] + [--s3=s3-interface-provider] + [--cfs-nondatafile-mode] + [--dry-run] [--help] - pg_probackup restore -B backup-path --instance=instance_name + pg_probackup restore -B backup-dir --instance=instance-name [-D pgdata-path] [-i backup-id] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -98,29 +110,35 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ssh-options] [--archive-host=hostname] [--archive-port=port] [--archive-user=username] + [--s3=s3-interface-provider] + [--dry-run] [--help] - pg_probackup validate -B backup-path [--instance=instance_name] + pg_probackup validate -B backup-dir [--instance=instance-name] [-i backup-id] [--progress] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] [--recovery-target-timeline=timeline] [--recovery-target-name=target-name] [--skip-block-validation] + [--s3=s3-interface-provider] + [--wal] [--help] - pg_probackup checkdb [-B backup-path] [--instance=instance_name] + pg_probackup checkdb [-B backup-dir] [--instance=instance-name] [-D pgdata-path] [--progress] [-j num-threads] [--amcheck] [--skip-block-validation] [--heapallindexed] [--checkunique] [--help] - pg_probackup show -B backup-path - [--instance=instance_name [-i backup-id]] + pg_probackup show -B backup-dir + [--instance=instance-name [-i backup-id]] [--format=format] [--archive] - [--no-color] [--help] + [--no-color] [--show-symlinks] + [--s3=s3-interface-provider] + [--help] - pg_probackup delete -B backup-path --instance=instance_name + pg_probackup delete -B backup-dir --instance=instance-name [-j num-threads] [--progress] [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] @@ -128,26 +146,35 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [-i backup-id | --delete-expired | --merge-expired | --status=backup_status] [--delete-wal] [--dry-run] [--no-validate] [--no-sync] + [--s3=s3-interface-provider] [--help] - pg_probackup merge -B backup-path --instance=instance_name + pg_probackup merge -B backup-dir --instance=instance-name -i backup-id [--progress] [-j num-threads] [--no-validate] [--no-sync] + [--dry-run] [--help] - pg_probackup add-instance -B backup-path -D pgdata-path - --instance=instance_name + pg_probackup add-instance -B backup-dir -D pgdata-path + --instance=instance-name [--external-dirs=external-directories-paths] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--s3=s3-interface-provider] + [--skip-if-exists] + [--dry-run] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] [--help] - pg_probackup del-instance -B backup-path - --instance=instance_name + pg_probackup del-instance -B backup-dir + --instance=instance-name + [--s3=s3-interface-provider] + [--dry-run] [--help] - pg_probackup archive-push -B backup-path --instance=instance_name + pg_probackup archive-push -B backup-dir --instance=instance-name --wal-file-name=wal-file-name [--wal-file-path=wal-file-path] [-j num-threads] [--batch-size=batch_size] @@ -159,9 +186,10 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] - pg_probackup archive-get -B backup-path --instance=instance_name + pg_probackup archive-get -B backup-dir --instance=instance-name --wal-file-path=wal-file-path --wal-file-name=wal-file-name [-j num-threads] [--batch-size=batch_size] @@ -169,6 +197,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] pg_probackup catchup -b catchup-mode @@ -178,6 +207,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [-j num-threads] [-T OLDDIR=NEWDIR] [--exclude-path=path_prefix] + [-X WALDIR | --waldir=WALDIR] [-d dbname] [-h host] [-p port] [-U username] [-w --no-password] [-W --password] [--remote-proto] [--remote-host] @@ -186,5 +216,31 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--dry-run] [--help] + pg_probackup S3 Enviroment variables +PG_PROBACKUP_S3_HOST Host name of the S3 server +PG_PROBACKUP_S3_PORT Port of the S3 server +PG_PROBACKUP_S3_REGION Region of the S3 server +PG_PROBACKUP_S3_BUCKET_NAME Bucket on the S3 server +PG_PROBACKUP_S3_ACCESS_KEY, +PG_PROBACKUP_S3_SECRET_ACCESS_KEY Secure tokens on the S3 server +PG_PROBACKUP_S3_HTTPS S3 HTTP protocol + Set to ON or HTTPS for HTTPS or to any other + value for HTTP. + Default values: + HTTP if --s3=minio is specified + HTTPS otherwise +PG_PROBACKUP_S3_BUFFER_SIZE Size of the read/write buffer (in MiB) for + communicating with S3 (default: 16) +PG_PROBACKUP_S3_RETRIES Maximum number of attempts to execute an S3 + request in case of failures (default: 5) +PG_PROBACKUP_S3_TIMEOUT Maximum allowable amount of time (in seconds) + for transferring PG_PROBACKUP_S3_BUFFER_SIZE + of data to/from S3 (default: 300) +PG_PROBACKUP_S3_IGNORE_CERT_VER Don't verify the certificate host and peer +PG_PROBACKUP_S3_CA_CERTIFICATE Trust to the path to Certificate Authority (CA) bundle +PG_PROBACKUP_S3_CA_PATH Trust to the directory holding CA certificates +PG_PROBACKUP_S3_CLIENT_CERT Setup SSL client certificate +PG_PROBACKUP_S3_CLIENT_KEY Setup private key file for TLS and SSL client certificate + Read the website for details . Report bugs to . diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index 976932b9d..c152fe0b8 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -5,9 +5,13 @@ pg_probackup - утилита для управления резервным к pg_probackup version - pg_probackup init -B backup-path + pg_probackup init -B backup-dir + [--s3=s3-interface-provider] + [--skip-if-exists] + [--dry-run] + [--help] - pg_probackup set-config -B backup-path --instance=instance_name + pg_probackup set-config -B backup-dir --instance=instance-name [-D pgdata-path] [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] @@ -30,18 +34,22 @@ pg_probackup - утилита для управления резервным к [--ssh-options] [--restore-command=cmdline] [--archive-host=destination] [--archive-port=port] [--archive-user=username] + [--write-rate-limit=baudrate] + [--dry-run] [--help] - pg_probackup set-backup -B backup-path --instance=instance_name + pg_probackup set-backup -B backup-dir --instance=instance-name -i backup-id [--ttl=interval] [--expire-time=timestamp] [--note=text] + [--dry-run] [--help] - pg_probackup show-config -B backup-path --instance=instance_name + pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] + [--no-scale-units] [--help] - pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name [-D pgdata-path] [-C] [--stream [-S slot-name] [--temp-slot]] [--backup-pg-log] [-j num-threads] [--progress] @@ -71,9 +79,13 @@ pg_probackup - утилита для управления резервным к [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--ttl=interval] [--expire-time=timestamp] [--note=text] + [--write-rate-limit=baudrate] + [--s3=s3-interface-provider] + [--cfs-nondatafile-mode] + [--dry-run] [--help] - pg_probackup restore -B backup-path --instance=instance_name + pg_probackup restore -B backup-dir --instance=instance-name [-D pgdata-path] [-i backup-id] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -98,29 +110,35 @@ pg_probackup - утилита для управления резервным к [--ssh-options] [--archive-host=hostname] [--archive-port=port] [--archive-user=username] + [--s3=s3-interface-provider] + [--dry-run] [--help] - pg_probackup validate -B backup-path [--instance=instance_name] + pg_probackup validate -B backup-dir [--instance=instance-name] [-i backup-id] [--progress] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] [--recovery-target-timeline=timeline] [--recovery-target-name=target-name] [--skip-block-validation] + [--s3=s3-interface-provider] + [--wal] [--help] - pg_probackup checkdb [-B backup-path] [--instance=instance_name] + pg_probackup checkdb [-B backup-dir] [--instance=instance-name] [-D pgdata-path] [--progress] [-j num-threads] [--amcheck] [--skip-block-validation] [--heapallindexed] [--checkunique] [--help] - pg_probackup show -B backup-path - [--instance=instance_name [-i backup-id]] + pg_probackup show -B backup-dir + [--instance=instance-name [-i backup-id]] [--format=format] [--archive] - [--no-color] [--help] + [--no-color] [--show-symlinks] + [--s3=s3-interface-provider] + [--help] - pg_probackup delete -B backup-path --instance=instance_name + pg_probackup delete -B backup-dir --instance=instance-name [-j num-threads] [--progress] [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] @@ -128,26 +146,35 @@ pg_probackup - утилита для управления резервным к [-i backup-id | --delete-expired | --merge-expired | --status=backup_status] [--delete-wal] [--dry-run] [--no-validate] [--no-sync] + [--s3=s3-interface-provider] [--help] - pg_probackup merge -B backup-path --instance=instance_name + pg_probackup merge -B backup-dir --instance=instance-name -i backup-id [--progress] [-j num-threads] [--no-validate] [--no-sync] + [--dry-run] [--help] - pg_probackup add-instance -B backup-path -D pgdata-path - --instance=instance_name + pg_probackup add-instance -B backup-dir -D pgdata-path + --instance=instance-name [--external-dirs=external-directories-paths] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--s3=s3-interface-provider] + [--skip-if-exists] + [--dry-run] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] [--help] - pg_probackup del-instance -B backup-path - --instance=instance_name + pg_probackup del-instance -B backup-dir + --instance=instance-name + [--s3=s3-interface-provider] + [--dry-run] [--help] - pg_probackup archive-push -B backup-path --instance=instance_name + pg_probackup archive-push -B backup-dir --instance=instance-name --wal-file-name=wal-file-name [--wal-file-path=wal-file-path] [-j num-threads] [--batch-size=batch_size] @@ -159,9 +186,10 @@ pg_probackup - утилита для управления резервным к [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] - pg_probackup archive-get -B backup-path --instance=instance_name + pg_probackup archive-get -B backup-dir --instance=instance-name --wal-file-path=wal-file-path --wal-file-name=wal-file-name [-j num-threads] [--batch-size=batch_size] @@ -169,6 +197,7 @@ pg_probackup - утилита для управления резервным к [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] pg_probackup catchup -b catchup-mode @@ -178,6 +207,7 @@ pg_probackup - утилита для управления резервным к [-j num-threads] [-T OLDDIR=NEWDIR] [--exclude-path=path_prefix] + [-X WALDIR | --waldir=WALDIR] [-d dbname] [-h host] [-p port] [-U username] [-w --no-password] [-W --password] [--remote-proto] [--remote-host] @@ -186,5 +216,31 @@ pg_probackup - утилита для управления резервным к [--dry-run] [--help] + pg_probackup S3 Enviroment variables +PG_PROBACKUP_S3_HOST Host name of the S3 server +PG_PROBACKUP_S3_PORT Port of the S3 server +PG_PROBACKUP_S3_REGION Region of the S3 server +PG_PROBACKUP_S3_BUCKET_NAME Bucket on the S3 server +PG_PROBACKUP_S3_ACCESS_KEY, +PG_PROBACKUP_S3_SECRET_ACCESS_KEY Secure tokens on the S3 server +PG_PROBACKUP_S3_HTTPS S3 HTTP protocol + Set to ON or HTTPS for HTTPS or to any other + value for HTTP. + Default values: + HTTP if --s3=minio is specified + HTTPS otherwise +PG_PROBACKUP_S3_BUFFER_SIZE Size of the read/write buffer (in MiB) for + communicating with S3 (default: 16) +PG_PROBACKUP_S3_RETRIES Maximum number of attempts to execute an S3 + request in case of failures (default: 5) +PG_PROBACKUP_S3_TIMEOUT Maximum allowable amount of time (in seconds) + for transferring PG_PROBACKUP_S3_BUFFER_SIZE + of data to/from S3 (default: 300) +PG_PROBACKUP_S3_IGNORE_CERT_VER Don't verify the certificate host and peer +PG_PROBACKUP_S3_CA_CERTIFICATE Trust to the path to Certificate Authority (CA) bundle +PG_PROBACKUP_S3_CA_PATH Trust to the directory holding CA certificates +PG_PROBACKUP_S3_CLIENT_CERT Setup SSL client certificate +PG_PROBACKUP_S3_CLIENT_KEY Setup private key file for TLS and SSL client certificate + Подробнее читайте на сайте . Сообщайте об ошибках в . diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out deleted file mode 100644 index 0d50cb268..000000000 --- a/tests/expected/option_version.out +++ /dev/null @@ -1 +0,0 @@ -pg_probackup 2.5.12 diff --git a/tests/external_test.py b/tests/external_test.py index 53f3c5449..dd6aa1b24 100644 --- a/tests/external_test.py +++ b/tests/external_test.py @@ -1,13 +1,12 @@ import unittest import os from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.cfs_helpers import find_by_name +from .helpers.ptrack_helpers import ProbackupTest, fs_backup_class import shutil # TODO: add some ptrack tests -class ExternalTest(ProbackupTest, unittest.TestCase): +class ExternalTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure @@ -17,60 +16,41 @@ def test_basic_external(self): with external directory, restore backup, check that external directory was successfully copied """ - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + backup_dir = self.backup_dir external_dir = self.get_tblspace_path(node, 'somedirectory') # create directory in external_directory - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL backup with external directory pointing to a file - file_path = os.path.join(core_dir, 'file') + file_path = os.path.join(self.test_path, 'file') with open(file_path, "w+") as f: pass - try: - self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=[ - '--external-dirs={0}'.format(file_path)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir point to a file" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: --external-dirs option' in e.message and - 'directory or symbolic link expected' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type="full", + options=[ + '--external-dirs={0}'.format(file_path)], + expect_error="because external dir point to a file") + self.assertMessage(contains='ERROR: --external-dirs option') + self.assertMessage(contains='directory or symbolic link expected') sleep(1) # FULL backup - self.backup_node( - backup_dir, 'node', node, backup_type="full", + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) # Fill external directories - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir, options=["-j", "4"]) # Full backup with external dir - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--external-dirs={0}'.format(external_dir)]) @@ -80,8 +60,7 @@ def test_basic_external(self): node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node=node, options=["-j", "4"]) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -97,39 +76,32 @@ def test_external_none(self): restore delta backup, check that external directory was not copied """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + backup_dir = self.backup_dir external_dir = self.get_tblspace_path(node, 'somedirectory') # create directory in external_directory - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL backup - self.backup_node( - backup_dir, 'node', node, backup_type="full", + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) # Fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir, options=["-j", "4"]) # Full backup with external dir - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--stream', '--external-dirs={0}'.format(external_dir)]) # Delta backup without external directory - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.backup_node('node', node, backup_type="delta", options=['--external-dirs=none', '--stream']) shutil.rmtree(external_dir, ignore_errors=True) @@ -139,8 +111,7 @@ def test_external_none(self): node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node=node, options=["-j", "4"]) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -154,47 +125,34 @@ def test_external_dirs_overlapping(self): take backup with two external directories pointing to the same directory, backup should fail """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + backup_dir = self.backup_dir external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') # create directory in external_directory - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() os.mkdir(external_dir1) os.mkdir(external_dir2) # Full backup with external dirs - try: - self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", "{0}{1}{2}{1}{0}".format( - external_dir1, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir2, - self.EXTERNAL_DIRECTORY_DELIMITER, - external_dir1)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: External directory path (-E option)' in e.message and - 'contain another external directory' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}{1}{2}{1}{0}".format( + external_dir1, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir2, + self.EXTERNAL_DIRECTORY_DELIMITER, + external_dir1)], + expect_error="because tablespace mapping is incorrect") + self.assertMessage(regex=r'ERROR: External directory path \(-E option\) ".*" ' + r'contain another external directory') # @unittest.skip("skip") def test_external_dir_mapping(self): @@ -204,64 +162,42 @@ def test_external_dir_mapping(self): check that restore with external-dir mapping will end with success """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.backup_node( - backup_dir, 'node', node, backup_type="full", + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') # Fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=[ - "-j", "4", - "--external-mapping={0}={1}".format( - external_dir1, external_dir1_new), - "--external-mapping={0}={1}".format( - external_dir2, external_dir2_new)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because tablespace mapping is incorrect" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: --external-mapping option' in e.message and - 'have an entry in list of external directories' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.restore_node('node', node=node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format( + external_dir1, external_dir1_new), + "--external-mapping={0}={1}".format( + external_dir2, external_dir2_new)], + expect_error="because tablespace mapping is incorrect") + self.assertMessage(contains=r"ERROR: --external-mapping option's old directory " + r"doesn't have an entry in list of external directories") + + self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -272,8 +208,7 @@ def test_external_dir_mapping(self): pgdata = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node=node_restored, options=[ "-j", "4", "--external-mapping={0}={1}".format( @@ -289,40 +224,30 @@ def test_external_dir_mapping(self): # @unittest.expectedFailure def test_backup_multiple_external(self): """check that cmdline has priority over config""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') # FULL backup - self.backup_node( - backup_dir, 'node', node, backup_type="full", + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=['-E', external_dir1]) # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", external_dir2]) @@ -334,8 +259,7 @@ def test_backup_multiple_external(self): shutil.rmtree(external_dir1, ignore_errors=True) shutil.rmtree(external_dir2, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node=node, options=["-j", "4"]) pgdata_restored = self.pgdata_content( @@ -354,64 +278,50 @@ def test_external_backward_compatibility(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) + self.pb.init(old_binary=True) + self.pb.show() - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) + self.pb.add_instance('node', node, old_binary=True) + self.pb.show() node.slow_start() node.pgbench_init(scale=3) # FULL backup with old binary without external dirs support - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, old_binary=True, options=["-j", "4", "--stream"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', external_dir2, options=["-j", "4"]) pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) pgbench.wait() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, old_binary=True, options=["-j", "4", "--stream"]) # fill external directories with changed data shutil.rmtree(external_dir1, ignore_errors=True) shutil.rmtree(external_dir2, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # delta backup with external directories using new binary - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -423,16 +333,14 @@ def test_external_backward_compatibility(self): node.base_dir, exclude_dirs=['logs']) # RESTORE chain with new binary - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), @@ -444,7 +352,6 @@ def test_external_backward_compatibility(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_external_backward_compatibility_merge_1(self): """ take backup with old binary without external dirs support @@ -454,52 +361,42 @@ def test_external_backward_compatibility_merge_1(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) + self.pb.init(old_binary=True) + self.pb.show() - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) + self.pb.add_instance('node', node, old_binary=True) + self.pb.show() node.slow_start() node.pgbench_init(scale=3) # tmp FULL backup with old binary - tmp_id = self.backup_node( - backup_dir, 'node', node, + tmp_id = self.pb.backup_node('node', node, old_binary=True, options=["-j", "4", "--stream"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # FULL backup with old binary without external dirs support - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, old_binary=True, options=["-j", "4", "--stream"]) pgbench = node.pgbench(options=['-T', '30', '-c', '1']) pgbench.wait() # delta backup with external directories using new binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", + backup_id = self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -511,19 +408,17 @@ def test_external_backward_compatibility_merge_1(self): node.base_dir, exclude_dirs=['logs']) # Merge chain chain with new binary - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) # Restore merged backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), @@ -535,7 +430,6 @@ def test_external_backward_compatibility_merge_1(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_external_backward_compatibility_merge_2(self): """ take backup with old binary without external dirs support @@ -545,52 +439,42 @@ def test_external_backward_compatibility_merge_2(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.show_pb(backup_dir) + self.pb.init(old_binary=True) + self.pb.show() - self.add_instance(backup_dir, 'node', node, old_binary=True) - self.show_pb(backup_dir) + self.pb.add_instance('node', node, old_binary=True) + self.pb.show() node.slow_start() node.pgbench_init(scale=3) # tmp FULL backup with old binary - tmp_id = self.backup_node( - backup_dir, 'node', node, + tmp_id = self.pb.backup_node('node', node, old_binary=True, options=["-j", "4", "--stream"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # FULL backup with old binary without external dirs support - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, old_binary=True, options=["-j", "4", "--stream"]) pgbench = node.pgbench(options=['-T', '30', '-c', '1']) pgbench.wait() # delta backup with external directories using new binary - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", @@ -606,19 +490,14 @@ def test_external_backward_compatibility_merge_2(self): shutil.rmtree(external_dir1, ignore_errors=True) shutil.rmtree(external_dir2, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, + self.pb.restore_node('node', external_dir1, options=['-j', '4', '--skip-external-dirs']) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, + self.pb.restore_node('node', external_dir2, options=['-j', '4', '--skip-external-dirs']) # delta backup without external directories using old binary - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", @@ -631,11 +510,10 @@ def test_external_backward_compatibility_merge_2(self): node.base_dir, exclude_dirs=['logs']) # Merge chain using new binary - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) # Restore merged backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() @@ -644,8 +522,7 @@ def test_external_backward_compatibility_merge_2(self): external_dir2_new = self.get_tblspace_path( node_restored, 'external_dir2') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "--external-mapping={0}={1}".format( @@ -659,45 +536,45 @@ def test_external_backward_compatibility_merge_2(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_external_merge(self): """""" if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init() + self.pb.add_instance('node', node, old_binary=True) node.slow_start() node.pgbench_init(scale=3) # take temp FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) + tmp_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') + self.create_tblspace_in_node(node, 'tblsp_1') + node.safe_psql( + "postgres", + "create table t_heap_lame tablespace tblsp_1 " + "as select 1 as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector " + "from generate_series(0,100) i") + # fill external directories with data - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', external_dir1, backup_id=tmp_id, + options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', external_dir2, backup_id=tmp_id, + options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # FULL backup with old binary without external dirs support - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, old_binary=True, options=["-j", "4", "--stream"]) # change data a bit @@ -705,8 +582,7 @@ def test_external_merge(self): pgbench.wait() # delta backup with external directories using new binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", + backup_id = self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -717,10 +593,10 @@ def test_external_merge(self): pgdata = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) - print(self.show_pb(backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) # Merge - print(self.merge_backup(backup_dir, 'node', backup_id=backup_id, + print(self.pb.merge_backup('node', backup_id=backup_id, options=['--log-level-file=VERBOSE'])) # RESTORE @@ -730,8 +606,7 @@ def test_external_merge(self): external_dir1_new = self.get_tblspace_path(node, 'external_dir1') external_dir2_new = self.get_tblspace_path(node, 'external_dir2') - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ "-j", "4", "--external-mapping={0}={1}".format( @@ -748,43 +623,36 @@ def test_external_merge(self): # @unittest.skip("skip") def test_external_merge_skip_external_dirs(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=3) # FULL backup with old data - tmp_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) + tmp_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') # fill external directories with old data - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, backup_id=tmp_id, + options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, backup_id=tmp_id, + options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # change data a bit pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) pgbench.wait() # FULL backup with external directories - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -797,19 +665,14 @@ def test_external_merge_skip_external_dirs(self): shutil.rmtree(external_dir2, ignore_errors=True) # fill external directories with new data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4", "--skip-external-dirs"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4", "--skip-external-dirs"]) # DELTA backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", + backup_id = self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -821,16 +684,14 @@ def test_external_merge_skip_external_dirs(self): node.base_dir, exclude_dirs=['logs']) # merge backups without external directories - self.merge_backup( - backup_dir, 'node', + self.pb.merge_backup('node', backup_id=backup_id, options=['--skip-external-dirs']) # RESTORE node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node=node, options=["-j", "4"]) pgdata_restored = self.pgdata_content( @@ -839,24 +700,20 @@ def test_external_merge_skip_external_dirs(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_external_merge_1(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=3) # FULL backup - self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) + self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') @@ -865,24 +722,18 @@ def test_external_merge_1(self): pgbench.wait() # FULL backup with changed data - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill external directories with changed data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # delta backup with external directories using new binary - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", + backup_id = self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -890,7 +741,7 @@ def test_external_merge_1(self): self.EXTERNAL_DIRECTORY_DELIMITER, external_dir2)]) - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) pgdata = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -902,8 +753,7 @@ def test_external_merge_1(self): external_dir1_new = self.get_tblspace_path(node, 'external_dir1') external_dir2_new = self.get_tblspace_path(node, 'external_dir2') - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node=node, options=[ "-j", "4", "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), @@ -918,21 +768,19 @@ def test_external_merge_1(self): # @unittest.skip("skip") def test_external_merge_3(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=3) # FULL backup - self.backup_node(backup_dir, 'node', node, options=["-j", "4"]) + self.pb.backup_node('node', node, options=["-j", "4"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') @@ -941,23 +789,17 @@ def test_external_merge_3(self): pgbench.wait() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # fill external directories with changed data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1) + self.pb.restore_node('node', restore_dir=external_dir1) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2) + self.pb.restore_node('node', restore_dir=external_dir2) - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # page backup with external directories - self.backup_node( - backup_dir, 'node', node, backup_type="page", + self.pb.backup_node('node', node, backup_type="page", options=[ "-j", "4", "-E", "{0}{1}{2}".format( @@ -966,8 +808,7 @@ def test_external_merge_3(self): external_dir2)]) # page backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page", + backup_id = self.pb.backup_node('node', node, backup_type="page", options=[ "-j", "4", "-E", "{0}{1}{2}".format( @@ -978,8 +819,7 @@ def test_external_merge_3(self): pgdata = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) - self.merge_backup( - backup_dir, 'node', backup_id=backup_id, + self.pb.merge_backup('node', backup_id=backup_id, options=['--log-level-file=verbose']) # RESTORE @@ -989,8 +829,7 @@ def test_external_merge_3(self): external_dir1_new = self.get_tblspace_path(node, 'external_dir1') external_dir2_new = self.get_tblspace_path(node, 'external_dir2') - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ "-j", "4", "--external-mapping={0}={1}".format( @@ -1004,24 +843,20 @@ def test_external_merge_3(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_external_merge_2(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=3) # FULL backup - self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) + self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') @@ -1030,24 +865,18 @@ def test_external_merge_2(self): pgbench.wait() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill external directories with changed data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # delta backup with external directories - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -1056,8 +885,7 @@ def test_external_merge_2(self): external_dir2)]) # delta backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", + backup_id = self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -1072,7 +900,7 @@ def test_external_merge_2(self): shutil.rmtree(external_dir2, ignore_errors=True) # delta backup without external directories - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) # RESTORE node.cleanup() @@ -1081,8 +909,7 @@ def test_external_merge_2(self): external_dir1_new = self.get_tblspace_path(node, 'external_dir1') external_dir2_new = self.get_tblspace_path(node, 'external_dir2') - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node=node, options=[ "-j", "4", "--external-mapping={0}={1}".format(external_dir1, external_dir1_new), @@ -1097,14 +924,12 @@ def test_external_merge_2(self): # @unittest.skip("skip") def test_restore_external_changed_data(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=2) @@ -1114,28 +939,22 @@ def test_restore_external_changed_data(self): external_dir2 = self.get_tblspace_path(node, 'external_dir2') # FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', + tmp_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # change data a bit pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) pgbench.wait() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -1147,14 +966,10 @@ def test_restore_external_changed_data(self): shutil.rmtree(external_dir1, ignore_errors=True) shutil.rmtree(external_dir2, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, backup_id=backup_id, + self.pb.restore_node('node', restore_dir=external_dir1, backup_id=backup_id, options=["-j", "4", "--skip-external-dirs"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, backup_id=backup_id, + self.pb.restore_node('node', restore_dir=external_dir2, backup_id=backup_id, options=["-j", "4", "--skip-external-dirs"]) # change data a bit more @@ -1162,8 +977,7 @@ def test_restore_external_changed_data(self): pgbench.wait() # Delta backup with external directories - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -1178,8 +992,7 @@ def test_restore_external_changed_data(self): node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=["-j", "4"]) pgdata_restored = self.pgdata_content( @@ -1191,16 +1004,14 @@ def test_restore_external_changed_data(self): # @unittest.skip("skip") def test_restore_external_changed_data_1(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '32MB'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=1) @@ -1210,28 +1021,22 @@ def test_restore_external_changed_data_1(self): external_dir2 = self.get_tblspace_path(node, 'external_dir2') # FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', + tmp_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # change data a bit pgbench = node.pgbench(options=['-T', '5', '-c', '1', '--no-vacuum']) pgbench.wait() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -1243,14 +1048,10 @@ def test_restore_external_changed_data_1(self): shutil.rmtree(external_dir1, ignore_errors=True) shutil.rmtree(external_dir2, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, backup_id=backup_id, + self.pb.restore_node('node', restore_dir=external_dir1, backup_id=backup_id, options=["-j", "4", "--skip-external-dirs"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, backup_id=backup_id, + self.pb.restore_node('node', restore_dir=external_dir2, backup_id=backup_id, options=["-j", "4", "--skip-external-dirs"]) # change data a bit more @@ -1258,8 +1059,7 @@ def test_restore_external_changed_data_1(self): pgbench.wait() # Delta backup with only one external directory - self.backup_node( - backup_dir, 'node', node, backup_type="delta", + self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", external_dir1]) @@ -1268,17 +1068,15 @@ def test_restore_external_changed_data_1(self): node.base_dir, exclude_dirs=['logs', 'external_dir2']) # Restore - node.cleanup() - shutil.rmtree(node._base_dir) + node.stop() + shutil.rmtree(node.base_dir) # create empty file in external_dir2 - os.mkdir(node._base_dir) - os.mkdir(external_dir2) + os.makedirs(external_dir2) with open(os.path.join(external_dir2, 'file'), 'w+') as f: f.close() - output = self.restore_node( - backup_dir, 'node', node, + output = self.pb.restore_node('node', node=node, options=["-j", "4"]) self.assertNotIn( @@ -1291,19 +1089,16 @@ def test_restore_external_changed_data_1(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.expectedFailure - # @unittest.skip("skip") def test_merge_external_changed_data(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '32MB'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=2) @@ -1313,28 +1108,22 @@ def test_merge_external_changed_data(self): external_dir2 = self.get_tblspace_path(node, 'external_dir2') # FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', + tmp_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # change data a bit pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) pgbench.wait() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -1346,14 +1135,10 @@ def test_merge_external_changed_data(self): shutil.rmtree(external_dir1, ignore_errors=True) shutil.rmtree(external_dir2, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, backup_id=backup_id, + self.pb.restore_node('node', restore_dir=external_dir1, backup_id=backup_id, options=["-j", "4", "--skip-external-dirs"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, backup_id=backup_id, + self.pb.restore_node('node', restore_dir=external_dir2, backup_id=backup_id, options=["-j", "4", "--skip-external-dirs"]) # change data a bit more @@ -1361,8 +1146,7 @@ def test_merge_external_changed_data(self): pgbench.wait() # Delta backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta", + backup_id = self.pb.backup_node('node', node, backup_type="delta", options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -1374,14 +1158,13 @@ def test_merge_external_changed_data(self): node.base_dir, exclude_dirs=['logs']) # Merge - self.merge_backup(backup_dir, 'node', backup_id) + self.pb.merge_backup('node', backup_id) # Restore node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node=node, options=["-j", "4"]) pgdata_restored = self.pgdata_content( @@ -1395,37 +1178,29 @@ def test_restore_skip_external(self): """ Check that --skip-external-dirs works correctly """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') # temp FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) + backup_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # FULL backup with external directories - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -1446,8 +1221,7 @@ def test_restore_skip_external(self): node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node=node, options=[ "-j", "4", "--skip-external-dirs"]) @@ -1467,41 +1241,32 @@ def test_external_dir_is_symlink(self): if os.name == 'nt': self.skipTest('Skipped for Windows') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') # temp FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) + backup_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - symlinked_dir = os.path.join(core_dir, 'symlinked') + symlinked_dir = os.path.join(self.test_path, 'symlinked') - self.restore_node( - backup_dir, 'node', node, - data_dir=symlinked_dir, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=symlinked_dir, options=["-j", "4"]) # drop temp FULL backup - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # create symlink to directory in external directory os.symlink(symlinked_dir, external_dir) # FULL backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", external_dir]) @@ -1509,8 +1274,7 @@ def test_external_dir_is_symlink(self): pgdata = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') # RESTORE node_restored.cleanup() @@ -1518,8 +1282,7 @@ def test_external_dir_is_symlink(self): external_dir_new = self.get_tblspace_path( node_restored, 'external_dir') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node=node_restored, options=[ "-j", "4", "--external-mapping={0}={1}".format( external_dir, external_dir_new)]) @@ -1531,8 +1294,7 @@ def test_external_dir_is_symlink(self): self.assertEqual( external_dir, - self.show_pb( - backup_dir, 'node', + self.pb.show('node', backup_id=backup_id)['external-dirs']) # @unittest.expectedFailure @@ -1546,43 +1308,34 @@ def test_external_dir_contain_symlink_on_dir(self): if os.name == 'nt': self.skipTest('Skipped for Windows') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') dir_in_external_dir = os.path.join(external_dir, 'dir') # temp FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) + backup_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - symlinked_dir = os.path.join(core_dir, 'symlinked') + symlinked_dir = os.path.join(self.test_path, 'symlinked') - self.restore_node( - backup_dir, 'node', node, - data_dir=symlinked_dir, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=symlinked_dir, options=["-j", "4"]) # drop temp FULL backup - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # create symlink to directory in external directory os.mkdir(external_dir) os.symlink(symlinked_dir, dir_in_external_dir) # FULL backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node=node, options=[ "-j", "4", "--stream", "-E", external_dir]) @@ -1590,8 +1343,7 @@ def test_external_dir_contain_symlink_on_dir(self): pgdata = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') # RESTORE node_restored.cleanup() @@ -1599,8 +1351,7 @@ def test_external_dir_contain_symlink_on_dir(self): external_dir_new = self.get_tblspace_path( node_restored, 'external_dir') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node=node_restored, options=[ "-j", "4", "--external-mapping={0}={1}".format( external_dir, external_dir_new)]) @@ -1612,8 +1363,7 @@ def test_external_dir_contain_symlink_on_dir(self): self.assertEqual( external_dir, - self.show_pb( - backup_dir, 'node', + self.pb.show('node', backup_id=backup_id)['external-dirs']) # @unittest.expectedFailure @@ -1627,35 +1377,27 @@ def test_external_dir_contain_symlink_on_file(self): if os.name == 'nt': self.skipTest('Skipped for Windows') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') file_in_external_dir = os.path.join(external_dir, 'file') # temp FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=["-j", "4", "--stream"]) + backup_id = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - symlinked_dir = os.path.join(core_dir, 'symlinked') + symlinked_dir = os.path.join(self.test_path, 'symlinked') - self.restore_node( - backup_dir, 'node', node, - data_dir=symlinked_dir, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=symlinked_dir, options=["-j", "4"]) # drop temp FULL backup - self.delete_pb(backup_dir, 'node', backup_id=backup_id) + self.pb.delete('node', backup_id=backup_id) # create symlink to directory in external directory src_file = os.path.join(symlinked_dir, 'postgresql.conf') @@ -1664,8 +1406,7 @@ def test_external_dir_contain_symlink_on_file(self): os.symlink(src_file, file_in_external_dir) # FULL backup with external directories - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", external_dir]) @@ -1673,8 +1414,7 @@ def test_external_dir_contain_symlink_on_file(self): pgdata = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') # RESTORE node_restored.cleanup() @@ -1682,8 +1422,7 @@ def test_external_dir_contain_symlink_on_file(self): external_dir_new = self.get_tblspace_path( node_restored, 'external_dir') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node=node_restored, options=[ "-j", "4", "--external-mapping={0}={1}".format( external_dir, external_dir_new)]) @@ -1695,8 +1434,7 @@ def test_external_dir_contain_symlink_on_file(self): self.assertEqual( external_dir, - self.show_pb( - backup_dir, 'node', + self.pb.show('node', backup_id=backup_id)['external-dirs']) # @unittest.expectedFailure @@ -1706,16 +1444,12 @@ def test_external_dir_is_tablespace(self): Check that backup fails with error if external directory points to tablespace """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') @@ -1726,24 +1460,10 @@ def test_external_dir_is_tablespace(self): node.pgbench_init(scale=1, tablespace='tblspace1') # FULL backup with external directories - try: - backup_id = self.backup_node( - backup_dir, 'node', node, - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir points to the tablespace" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'External directory path (-E option)', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + options=["-j", "4", "--stream", "-E", external_dir], + expect_error="because external dir points to the tablespace") + self.assertMessage(contains='External directory path (-E option)') def test_restore_external_dir_not_empty(self): """ @@ -1751,16 +1471,12 @@ def test_restore_external_dir_not_empty(self): if external directory point to not empty tablespace and if remapped directory also isn`t empty """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') @@ -1772,28 +1488,17 @@ def test_restore_external_dir_not_empty(self): f.close() # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", external_dir]) - node.cleanup() + node.stop() + shutil.rmtree(node.data_dir) - try: - self.restore_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir is not empty" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'External directory is not empty', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + expect_error="because external dir is not empty") + self.assertMessage(contains='External directory is not empty') external_dir_new = self.get_tblspace_path(node, 'external_dir_new') @@ -1803,23 +1508,11 @@ def test_restore_external_dir_not_empty(self): with open(os.path.join(external_dir_new, 'file1'), 'w+') as f: f.close() - try: - self.restore_node( - backup_dir, 'node', node, - options=['--external-mapping={0}={1}'.format( - external_dir, external_dir_new)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because remapped external dir is not empty" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'External directory is not empty', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + options=[f'--external-mapping', + f'{external_dir}={external_dir_new}'], + expect_error="because remapped external dir is not empty") + self.assertMessage(contains='External directory is not empty') def test_restore_external_dir_is_missing(self): """ @@ -1828,16 +1521,12 @@ def test_restore_external_dir_is_missing(self): take DELTA backup with external directory, which should fail """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') @@ -1849,8 +1538,7 @@ def test_restore_external_dir_is_missing(self): f.close() # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", external_dir]) @@ -1858,31 +1546,15 @@ def test_restore_external_dir_is_missing(self): # drop external directory shutil.rmtree(external_dir, ignore_errors=True) - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: External directory is not found:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='delta', + options=["-j", "4", "--stream", "-E", external_dir], + expect_error="because external dir is missing") + self.assertMessage(contains='ERROR: External directory is not found:') sleep(1) # take DELTA without external directories - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=["-j", "4", "--stream"]) @@ -1893,7 +1565,7 @@ def test_restore_external_dir_is_missing(self): node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -1909,16 +1581,12 @@ def test_merge_external_dir_is_missing(self): merge it into FULL, restore and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') @@ -1930,8 +1598,7 @@ def test_merge_external_dir_is_missing(self): f.close() # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", external_dir]) @@ -1939,31 +1606,15 @@ def test_merge_external_dir_is_missing(self): # drop external directory shutil.rmtree(external_dir, ignore_errors=True) - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', - options=[ - "-j", "4", "--stream", - "-E", external_dir]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because external dir is missing" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: External directory is not found:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='delta', + options=["-j", "4", "--stream", "-E", external_dir], + expect_error="because external dir is missing") + self.assertMessage(contains='ERROR: External directory is not found:') sleep(1) # take DELTA without external directories - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=["-j", "4", "--stream"]) @@ -1971,13 +1622,13 @@ def test_merge_external_dir_is_missing(self): node.base_dir, exclude_dirs=['logs']) # Merge - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) # Restore node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -1991,16 +1642,12 @@ def test_restore_external_dir_is_empty(self): restore DELRA backup, check that restored external directory is empty """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') @@ -2013,8 +1660,7 @@ def test_restore_external_dir_is_empty(self): f.close() # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", external_dir]) @@ -2023,8 +1669,7 @@ def test_restore_external_dir_is_empty(self): os.remove(os.path.join(external_dir, 'file')) # take DELTA backup with empty external directory - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=[ "-j", "4", "--stream", @@ -2037,7 +1682,7 @@ def test_restore_external_dir_is_empty(self): node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -2051,16 +1696,12 @@ def test_merge_external_dir_is_empty(self): merge backups and restore FULL, check that restored external directory is empty """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir = self.get_tblspace_path(node, 'external_dir') @@ -2073,8 +1714,7 @@ def test_merge_external_dir_is_empty(self): f.close() # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", external_dir]) @@ -2083,8 +1723,7 @@ def test_merge_external_dir_is_empty(self): os.remove(os.path.join(external_dir, 'file')) # take DELTA backup with empty external directory - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=[ "-j", "4", "--stream", @@ -2094,13 +1733,13 @@ def test_merge_external_dir_is_empty(self): node.base_dir, exclude_dirs=['logs']) # Merge - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) # Restore node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -2114,16 +1753,12 @@ def test_restore_external_dir_string_order(self): restore DELRA backup, check that restored external directory is empty """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') @@ -2141,8 +1776,7 @@ def test_restore_external_dir_string_order(self): f.close() # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -2158,8 +1792,7 @@ def test_restore_external_dir_string_order(self): # take DELTA backup and swap external_dir_2 and external_dir_1 # in external_dir_str - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=[ "-j", "4", "--stream", @@ -2175,7 +1808,7 @@ def test_restore_external_dir_string_order(self): node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -2190,16 +1823,12 @@ def test_merge_external_dir_string_order(self): restore DELRA backup, check that restored external directory is empty """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) - shutil.rmtree(core_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') @@ -2217,8 +1846,7 @@ def test_merge_external_dir_string_order(self): f.close() # FULL backup with external directory - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ "-j", "4", "--stream", "-E", "{0}{1}{2}".format( @@ -2234,8 +1862,7 @@ def test_merge_external_dir_string_order(self): # take DELTA backup and swap external_dir_2 and external_dir_1 # in external_dir_str - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=[ "-j", "4", "--stream", @@ -2248,13 +1875,13 @@ def test_merge_external_dir_string_order(self): node.base_dir, exclude_dirs=['logs']) # Merge backups - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) # Restore node.cleanup() shutil.rmtree(node.base_dir, ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content( node.base_dir, exclude_dirs=['logs']) @@ -2269,32 +1896,28 @@ def test_smart_restore_externals(self): make sure that files from externals are not copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # fill external directories with data - tmp_id = self.backup_node(backup_dir, 'node', node) + tmp_id = self.pb.backup_node('node', node) external_dir_1 = self.get_tblspace_path(node, 'external_dir_1') external_dir_2 = self.get_tblspace_path(node, 'external_dir_2') - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir_1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir_1, backup_id=tmp_id, + options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir_2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir_2, backup_id=tmp_id, + options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # create database node.safe_psql( @@ -2302,7 +1925,7 @@ def test_smart_restore_externals(self): "CREATE DATABASE testdb") # take FULL backup - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # drop database node.safe_psql( @@ -2310,29 +1933,24 @@ def test_smart_restore_externals(self): "DROP DATABASE testdb") # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node=node, backup_type='page') # restore PAGE backup node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, + self.pb.restore_node('node', node=node, backup_id=page_id, options=['--no-validate', '--log-level-file=VERBOSE']) - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() + logfile_content = self.read_pb_log() # get delta between FULL and PAGE filelists - filelist_full = self.get_backup_filelist( - backup_dir, 'node', full_id) + filelist_full = self.get_backup_filelist(backup_dir, 'node', full_id) - filelist_page = self.get_backup_filelist( - backup_dir, 'node', page_id) + filelist_page = self.get_backup_filelist(backup_dir, 'node', page_id) filelist_diff = self.get_backup_filelist_diff( filelist_full, filelist_page) + self.assertTrue(filelist_diff, 'There should be deleted files') for file in filelist_diff: self.assertNotIn(file, logfile_content) @@ -2344,32 +1962,27 @@ def test_external_validation(self): corrupt external file in backup, run validate which should fail """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # take temp FULL backup - tmp_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + tmp_id = self.pb.backup_node('node', node, options=['--stream']) external_dir = self.get_tblspace_path(node, 'external_dir') # fill external directories with data - self.restore_node( - backup_dir, 'node', node, backup_id=tmp_id, - data_dir=external_dir, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir, backup_id=tmp_id, + options=["-j", "4"]) - self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + self.pb.delete('node', backup_id=tmp_id) # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, + full_id = self.pb.backup_node('node', node, options=[ '--stream', '-E', "{0}".format(external_dir)]) @@ -2378,28 +1991,15 @@ def test_external_validation(self): backup_dir, 'backups', 'node', full_id, 'external_directories', 'externaldir1', 'postgresql.auto.conf') - with open(file, "r+b", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - try: - self.validate_pb(backup_dir) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because file in external dir is corrupted" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Invalid CRC of backup file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + to_corrupt = 'external_directories/externaldir1/postgresql.auto.conf' + self.corrupt_backup_file(backup_dir, 'node', full_id, to_corrupt, + damage=(42, b"blah")) + + self.pb.validate( + expect_error="because file in external dir is corrupted") + self.assertMessage(contains='WARNING: Invalid CRC of backup file') self.assertEqual( 'CORRUPT', - self.show_pb(backup_dir, 'node', full_id)['status'], + self.pb.show('node', full_id)['status'], 'Backup STATUS should be "CORRUPT"') diff --git a/tests/false_positive_test.py b/tests/false_positive_test.py index fbb785c60..e4e410fbf 100644 --- a/tests/false_positive_test.py +++ b/tests/false_positive_test.py @@ -2,12 +2,12 @@ import os from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -import subprocess +from .helpers.ptrack_helpers import ProbackupTest +from pg_probackup2.gdb import needs_gdb +from datetime import datetime -class FalsePositive(ProbackupTest, unittest.TestCase): +class FalsePositive(ProbackupTest): # @unittest.skip("skip") @unittest.expectedFailure @@ -15,88 +15,54 @@ def test_validate_wal_lost_segment(self): """ Loose segment located between backups. ExpectedFailure. This is BUG """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # make some wals node.pgbench_init(scale=5) # delete last wal segment - wals_dir = os.path.join(backup_dir, "wal", 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile( - os.path.join(wals_dir, f)) and not f.endswith('.backup')] - wals = map(int, wals) - os.remove(os.path.join(wals_dir, '0000000' + str(max(wals)))) + wals = self.get_instance_wal_list(backup_dir, 'node') + self.remove_instance_wal(backup_dir, 'node', max(wals)) # We just lost a wal segment and know nothing about it - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) self.assertTrue( - 'validation completed successfully' in self.validate_pb( - backup_dir, 'node')) + 'validation completed successfully' in self.pb.validate('node')) ######## @unittest.expectedFailure # Need to force validation of ancestor-chain def test_incremental_backup_corrupt_full_1(self): """page-level backup with corrupted full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) - file = os.path.join( - backup_dir, "backups", "node", - backup_id.decode("utf-8"), "database", "postgresql.conf") - os.remove(file) - - try: - self.backup_node(backup_dir, 'node', node, backup_type="page") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be " - "possible without valid full backup.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertEqual( - e.message, - 'ERROR: Valid full backup on current timeline is not found. ' - 'Create new FULL backup before an incremental one.\n', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertFalse( - True, - "Expecting Error because page backup should not be " - "possible without valid full backup.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertEqual( - e.message, + backup_id = self.pb.backup_node('node', node) + self.remove_backup_file(backup_dir, 'node', backup_id, + 'database/postgresql.conf') + + self.pb.backup_node('node', node, backup_type="page", + expect_error="because page backup without full is impossible") + self.assertMessage(contains= 'ERROR: Valid full backup on current timeline is not found. ' - 'Create new FULL backup before an incremental one.\n', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + 'Create new FULL backup before an incremental one.') self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['Status'], "ERROR") + self.pb.show('node')[0]['Status'], "ERROR") # @unittest.skip("skip") @unittest.expectedFailure @@ -104,38 +70,28 @@ def test_pg_10_waldir(self): """ test group access for PG >= 11 """ - if self.pg_config_version < self.version_to_num('10.0'): - self.skipTest('You need PostgreSQL >= 10 for this test') - - wal_dir = os.path.join( - os.path.join(self.tmp_path, self.module_name, self.fname), 'wal_dir') + wal_dir = os.path.join(self.test_path, 'wal_dir') import shutil shutil.rmtree(wal_dir, ignore_errors=True) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=[ - '--data-checksums', - '--waldir={0}'.format(wal_dir)]) + initdb_params=['--waldir={0}'.format(wal_dir)]) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # take FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) # restore backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) # compare pgdata permissions pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -145,27 +101,29 @@ def test_pg_10_waldir(self): os.path.islink(os.path.join(node_restored.data_dir, 'pg_wal')), 'pg_wal should be symlink') - @unittest.expectedFailure + # @unittest.expectedFailure + @needs_gdb # @unittest.skip("skip") def test_recovery_target_time_backup_victim(self): """ Check that for validation to recovery target probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 + + @y.sokolov: looks like this test should pass. + So I commented 'expectedFailure' """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -173,9 +131,7 @@ def test_recovery_target_time_backup_victim(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") - target_time = node.safe_psql( - "postgres", - "select now()").rstrip() + target_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f") node.safe_psql( "postgres", @@ -183,47 +139,48 @@ def test_recovery_target_time_backup_victim(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,100) i") - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') + gdb.signal('SIGINT') gdb.continue_execution_until_error() - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + backup_id = self.pb.show('node')[1]['id'] self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], + self.pb.show('node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - self.validate_pb( - backup_dir, 'node', + self.pb.validate( + 'node', options=['--recovery-target-time={0}'.format(target_time)]) - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") + @needs_gdb def test_recovery_target_lsn_backup_victim(self): """ Check that for validation to recovery target probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 + + @y.sokolov: looks like this test should pass. + So I commented 'expectedFailure' """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -237,56 +194,51 @@ def test_recovery_target_lsn_backup_victim(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,100) i") - gdb = self.backup_node( - backup_dir, 'node', node, + gdb = self.pb.backup_node('node', node, options=['--log-level-console=LOG'], gdb=True) # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') + gdb.signal('SIGINT') gdb.continue_execution_until_error() - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + backup_id = self.pb.show('node')[1]['id'] self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node', backup_id)['status'], + self.pb.show('node', backup_id)['status'], 'Backup STATUS should be "ERROR"') self.switch_wal_segment(node) - target_lsn = self.show_pb(backup_dir, 'node', backup_id)['start-lsn'] + target_lsn = self.pb.show('node', backup_id)['start-lsn'] - self.validate_pb( - backup_dir, 'node', + self.pb.validate( + 'node', options=['--recovery-target-lsn={0}'.format(target_lsn)]) # @unittest.skip("skip") - @unittest.expectedFailure + @needs_gdb def test_streaming_timeout(self): """ Illustrate the problem of loosing exact error message because our WAL streaming engine is "borrowed" from pg_receivexlog """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '1h', 'wal_sender_timeout': '5s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, + gdb = self.pb.backup_node('node', node, gdb=True, options=['--stream', '--log-level-file=LOG']) # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one @@ -295,16 +247,10 @@ def test_streaming_timeout(self): sleep(10) gdb.continue_execution_until_error() - gdb._execute('detach') + gdb.detach() sleep(2) - log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file_path) as f: - log_content = f.read() - - self.assertIn( - 'could not receive data from WAL stream', - log_content) + log_content = self.read_pb_log() self.assertIn( 'ERROR: Problem in receivexlog', @@ -315,23 +261,12 @@ def test_streaming_timeout(self): def test_validate_all_empty_catalog(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because backup_dir is empty.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: This backup catalog contains no backup instances', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + + self.pb.validate( + expect_error="because backup_dir is empty") + self.assertMessage(contains= + 'ERROR: This backup catalog contains no backup instances') diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py index 2e5ed40e8..42f56e492 100644 --- a/tests/helpers/__init__.py +++ b/tests/helpers/__init__.py @@ -1,4 +1,4 @@ -__all__ = ['ptrack_helpers', 'cfs_helpers', 'data_helpers'] +__all__ = ['ptrack_helpers', 'data_helpers', 'fs_backup', 'init_helpers'] import unittest @@ -6,4 +6,4 @@ if not hasattr(unittest.TestCase, "skipTest"): def skipTest(self, reason): raise unittest.SkipTest(reason) - unittest.TestCase.skipTest = skipTest \ No newline at end of file + unittest.TestCase.skipTest = skipTest diff --git a/tests/helpers/cfs_helpers.py b/tests/helpers/cfs_helpers.py deleted file mode 100644 index 31af76f2e..000000000 --- a/tests/helpers/cfs_helpers.py +++ /dev/null @@ -1,93 +0,0 @@ -import os -import re -import random -import string - - -def find_by_extensions(dirs=None, extensions=None): - """ - find_by_extensions(['path1','path2'],['.txt','.log']) - :return: - Return list of files include full path by file extensions - """ - files = [] - new_dirs = [] - - if dirs is not None and extensions is not None: - for d in dirs: - try: - new_dirs += [os.path.join(d, f) for f in os.listdir(d)] - except OSError: - if os.path.splitext(d)[1] in extensions: - files.append(d) - - if new_dirs: - files.extend(find_by_extensions(new_dirs, extensions)) - - return files - - -def find_by_pattern(dirs=None, pattern=None): - """ - find_by_pattern(['path1','path2'],'^.*/*.txt') - :return: - Return list of files include full path by pattern - """ - files = [] - new_dirs = [] - - if dirs is not None and pattern is not None: - for d in dirs: - try: - new_dirs += [os.path.join(d, f) for f in os.listdir(d)] - except OSError: - if re.match(pattern,d): - files.append(d) - - if new_dirs: - files.extend(find_by_pattern(new_dirs, pattern)) - - return files - - -def find_by_name(dirs=None, filename=None): - files = [] - new_dirs = [] - - if dirs is not None and filename is not None: - for d in dirs: - try: - new_dirs += [os.path.join(d, f) for f in os.listdir(d)] - except OSError: - if os.path.basename(d) in filename: - files.append(d) - - if new_dirs: - files.extend(find_by_name(new_dirs, filename)) - - return files - - -def corrupt_file(filename): - file_size = None - try: - file_size = os.path.getsize(filename) - except OSError: - return False - - try: - with open(filename, "rb+") as f: - f.seek(random.randint(int(0.1*file_size),int(0.8*file_size))) - f.write(random_string(0.1*file_size)) - f.close() - except OSError: - return False - - return True - - -def random_string(n): - a = string.ascii_letters + string.digits - random_str = ''.join([random.choice(a) for i in range(int(n)+1)]) - return str.encode(random_str) -# return ''.join([random.choice(a) for i in range(int(n)+1)]) diff --git a/tests/helpers/data_helpers.py b/tests/helpers/data_helpers.py index 27cb66c3d..2acf2fddb 100644 --- a/tests/helpers/data_helpers.py +++ b/tests/helpers/data_helpers.py @@ -1,7 +1,200 @@ +import os import re +import random +import string import unittest -import functools import time +from array import array +import struct + + +def find_by_extension(dir, extensions, backup_dir=None): + """ + find_by_extensions('path1',['.txt','.log']) + + Add backup_dir if we need to check files from backup folder + :return: + Return list of files by file extensions. + If backup_dir is not passed, then file path include full path. + Otherwise file path is relative to backup_dir. + """ + if isinstance(extensions, str): + extensions = [extensions] + + if backup_dir is not None: + return [obj for obj in backup_dir.list_files(dir, recursive=True) + if os.path.splitext(obj)[1] in extensions] + + return [os.path.join(rootdir, obj) + for rootdir, dirs, objs in os.walk(dir, followlinks=True) + for obj in objs + if os.path.splitext(obj)[1] in extensions] + +def find_by_pattern(dir, pattern, backup_dir=None): + """ + find_by_pattern('path1','^.*/*.txt') + :return: + Return list of files include full path by pattern + """ + if backup_dir is not None: + return [obj for obj in backup_dir.list_files(dir, recursive=True) + if re.match(pattern, obj)] + + objs = (os.path.join(rootdir, obj) + for rootdir, dirs, objs in os.walk(dir, followlinks=True) + for obj in objs) + return [obj for obj in objs if re.match(pattern, obj)] + +def find_by_name(dir, filenames, backup_dir=None): + if isinstance(filenames, str): + filenames = [filenames] + + if backup_dir is not None: + return [obj for obj in backup_dir.list_files(dir, recursive=True) + if os.path.basename(obj) in filenames] + + return [os.path.join(rootdir, obj) + for rootdir, dirs, objs in os.walk(dir, followlinks=True) + for obj in objs + if obj in filenames] + + +def get_page_size(filename): + # fixed PostgreSQL page header size + PAGE_HEADER_SIZE = 24 + with open(filename, "rb+") as f: + page_header = f.read(PAGE_HEADER_SIZE) + assert len(page_header) == PAGE_HEADER_SIZE + + size = struct.unpack('H', page_header[18:20])[0] & 0xff00 + assert (size & (size - 1)) == 0 + + return size + + +def pg_checksum_block(raw_page, blkno): + N_SUMS = 32 + # prime multiplier of FNV-1a hash + FNV_PRIME = 16777619 + MASK = (1<<32) - 1 + + # Set pd_checksum to zero, so that the checksum calculation isn't + # affected by the old checksum stored on the page. + assert array('I').itemsize == 4 + page = array('I', raw_page[:8] + bytes([0, 0]) + raw_page[10:]) + + assert len(page) % N_SUMS == 0 + + sums = [ + 0x5B1F36E9, 0xB8525960, 0x02AB50AA, 0x1DE66D2A, + 0x79FF467A, 0x9BB9F8A3, 0x217E7CD2, 0x83E13D2C, + 0xF8D4474F, 0xE39EB970, 0x42C6AE16, 0x993216FA, + 0x7B093B5D, 0x98DAFF3C, 0xF718902A, 0x0B1C9CDB, + 0xE58F764B, 0x187636BC, 0x5D7B3BB1, 0xE73DE7DE, + 0x92BEC979, 0xCCA6C0B2, 0x304A0979, 0x85AA43D4, + 0x783125BB, 0x6CA8EAA2, 0xE407EAC6, 0x4B5CFC3E, + 0x9FBF8C76, 0x15CA20BE, 0xF2CA9FD3, 0x959BD756 + ] + + def mix2sum(s, v): + tmp = s ^ v + return ((tmp * FNV_PRIME) & MASK) ^ (tmp >> 17) + + def mix_chunk2sums(sums, values): + return [mix2sum(s, v) for s, v in zip(sums, values)] + + # main checksum calculation + for i in range(0, len(page), N_SUMS): + sums = mix_chunk2sums(sums, page[i:i+N_SUMS]) + + # finally add in two rounds of zeroes for additional mixing + for _ in range(2): + sums = mix_chunk2sums(sums, [0] * N_SUMS) + + # xor fold partial checksums together + result = blkno + for s in sums: + result ^= s + + return result % 65535 + 1 + + +def validate_data_file(filename, blcksz = 0) -> bool: + file_size = os.path.getsize(filename) + if blcksz == 0: + blcksz = get_page_size(filename) + assert file_size % blcksz == 0 + + # determine positional number of first page based on segment number + fname = os.path.basename(filename) + if '.' in fname: + segno = int(fname.rsplit('.', 1)[1]) + # Hardwired segments size 1GB + basepage = (1<<30) / blcksz * segno + else: + basepage = 0 + + with open(filename, "rb") as f: + for blckno in range(file_size // blcksz): + raw_page = f.read(blcksz) + if len(raw_page) == 0: + break + if len(raw_page) != blcksz: + return False + checksum = struct.unpack('H', raw_page[8:10])[0] + + calculated_checksum = pg_checksum_block(raw_page, basepage + blckno) + if checksum != calculated_checksum: + return False + + return True + + +def corrupt_data_file(filename): + blcksz = get_page_size(filename) + try: + while True: + if not corrupt_file(filename): + return False + if not validate_data_file(filename, blcksz): + return True + except OSError: + return False + + +def corrupt_file(filename): + file_size = None + try: + file_size = os.path.getsize(filename) + + with open(filename, "rb+") as f: + pos = random.randint(int(0.1*file_size),int(0.8*file_size)) + len = int(0.1 * file_size) + 1 + f.seek(pos) + old = f.read(len) + new = random_string(len, old) + f.seek(pos) + f.write(new) + except OSError: + return False + + return True + + +def random_string(n, old_bytes=b''): + """ + Generate random string so that it's not equal neither to old bytes nor + to casefold text of these bytes + """ + old_str = old_bytes.decode('latin-1', errors='replace').casefold() + template = string.ascii_letters + string.digits + random_bytes = old_bytes + random_str = old_str + while random_bytes == old_bytes or random_str.casefold() == old_str: + random_str = ''.join([random.choice(template) for i in range(int(n))]) + random_bytes = str.encode(random_str) + return random_bytes + def _tail_file(file, linetimeout, totaltimeout): start = time.time() @@ -76,3 +269,6 @@ def wait(self, *, contains:str = None, regex:str = None): def wait_shutdown(self): self.wait(contains='database system is shut down') + + def wait_archive_push_completed(self): + self.wait(contains='archive-push completed successfully') diff --git a/tests/helpers/enums/date_time_enum.py b/tests/helpers/enums/date_time_enum.py new file mode 100644 index 000000000..b96a58ec6 --- /dev/null +++ b/tests/helpers/enums/date_time_enum.py @@ -0,0 +1,7 @@ +from enum import Enum + + +class DateTimePattern(Enum): + # 2022-12-30 14:07:30+01 + Y_m_d_H_M_S_z_dash = '%Y-%m-%d %H:%M:%S%z' + Y_m_d_H_M_S_f_z_dash = '%Y-%m-%d %H:%M:%S.%f%z' diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 6b665097c..356ca8da0 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1,21 +1,34 @@ # you need os for unittest to work +import gzip +import io import os -import gc +import threading import unittest -from sys import exit, argv, version_info -import signal -import subprocess import shutil -import six +import sys + import testgres +from testgres.enums import NodeStatus import hashlib -import re -import getpass -import select -from time import sleep +import time import re import json -import random +import contextlib + +from pg_probackup2.gdb import GDBobj +from pg_probackup2.init_helpers import init_params +from pg_probackup2.app import ProbackupApp +from pg_probackup2.storage.fs_backup import TestBackupDir, FSTestBackupDir + +try: + import lz4.frame +except ImportError: + pass + +try: + import zstd +except ImportError: + pass idx_ptrack = { 't_heap': { @@ -63,349 +76,133 @@ } } -warning = """ -Wrong splint in show_pb -Original Header: -{header} -Original Body: -{body} -Splitted Header -{header_split} -Splitted Body -{body_split} -""" +def load_backup_class(fs_type): + fs_type = os.environ.get('PROBACKUP_FS_TYPE') + implementation = f"{__package__}.fs_backup.FSTestBackupDir" + if fs_type: + implementation = fs_type + + print("Using ", implementation) + module_name, class_name = implementation.rsplit(sep='.', maxsplit=1) + + module = importlib.import_module(module_name) + + return getattr(module, class_name) + + +fs_backup_class = FSTestBackupDir +if os.environ.get('PROBACKUP_FS_TYPE'): + fs_backup_class = load_backup_class(os.environ.get('PROBACKUP_FS_TYPE')) +# Run tests on s3 when we have PG_PROBACKUP_S3_TEST (minio, vk...) or PG_PROBACKUP_S3_CONFIG_FILE. +# If PG_PROBACKUP_S3_CONFIG_FILE is 'True', then using default conf file. Check config_provider.py +elif (os.environ.get('PG_PROBACKUP_S3_TEST') and os.environ.get('PG_PROBACKUP_S3_HOST') or + os.environ.get('PG_PROBACKUP_S3_CONFIG_FILE')): + root = os.path.realpath(os.path.join(os.path.dirname(__file__), '../..')) + if root not in sys.path: + sys.path.append(root) + from s3.test_utils.s3_backup import S3TestBackupDir + fs_backup_class = S3TestBackupDir def dir_files(base_dir): out_list = [] for dir_name, subdir_list, file_list in os.walk(base_dir): - if dir_name != base_dir: - out_list.append(os.path.relpath(dir_name, base_dir)) + rel_dir = os.path.relpath(dir_name, base_dir) + if rel_dir != '.': + out_list.append(rel_dir) for fname in file_list: out_list.append( os.path.relpath(os.path.join( dir_name, fname), base_dir) - ) + ) out_list.sort() return out_list -def is_pgpro(): - # pg_config --help - cmd = [os.environ['PG_CONFIG'], '--help'] - - result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) - return b'postgrespro' in result.stdout - - -def is_enterprise(): - # pg_config --help - cmd = [os.environ['PG_CONFIG'], '--help'] - - p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) - # PostgresPro std or ent - if b'postgrespro' in p.stdout: - cmd = [os.environ['PG_CONFIG'], '--pgpro-edition'] - p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) - - return b'enterprise' in p.stdout - else: # PostgreSQL - return False - - -def is_nls_enabled(): - cmd = [os.environ['PG_CONFIG'], '--configure'] - - result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) - return b'enable-nls' in result.stdout - - def base36enc(number): """Converts an integer to a base36 string.""" - alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' - base36 = '' - sign = '' - if number < 0: - sign = '-' - number = -number - - if 0 <= number < len(alphabet): - return sign + alphabet[number] + return '-' + base36enc(-number) - while number != 0: + alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + base36 = '' + while number >= len(alphabet): number, i = divmod(number, len(alphabet)) - base36 = alphabet[i] + base36 - - return sign + base36 - - -class ProbackupException(Exception): - def __init__(self, message, cmd): - self.message = message - self.cmd = cmd - - def __str__(self): - return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd) - -class PostgresNodeExtended(testgres.PostgresNode): - - def __init__(self, base_dir=None, *args, **kwargs): - super(PostgresNodeExtended, self).__init__(name='test', base_dir=base_dir, *args, **kwargs) - self.is_started = False + base36 += alphabet[i] + base36 += alphabet[number] + return base36[::-1] - def slow_start(self, replica=False): - # wait for https://github.com/postgrespro/testgres/pull/50 - # self.start() - # self.poll_query_until( - # "postgres", - # "SELECT not pg_is_in_recovery()", - # suppress={testgres.NodeConnection}) - if replica: - query = 'SELECT pg_is_in_recovery()' - else: - query = 'SELECT not pg_is_in_recovery()' - - self.start() - while True: - try: - output = self.safe_psql('template1', query).decode("utf-8").rstrip() - - if output == 't': - break - - except testgres.QueryException as e: - if 'database system is starting up' in e.message: - pass - elif 'FATAL: the database system is not accepting connections' in e.message: - pass - elif replica and 'Hot standby mode is disabled' in e.message: - raise e - else: - raise e - - sleep(0.5) - - def start(self, *args, **kwargs): - if not self.is_started: - super(PostgresNodeExtended, self).start(*args, **kwargs) - self.is_started = True - return self - - def stop(self, *args, **kwargs): - if self.is_started: - result = super(PostgresNodeExtended, self).stop(*args, **kwargs) - self.is_started = False - return result - - def kill(self, someone = None): - if self.is_started: - sig = signal.SIGKILL if os.name != 'nt' else signal.SIGBREAK - if someone == None: - os.kill(self.pid, sig) - else: - os.kill(self.auxiliary_pids[someone][0], sig) - self.is_started = False - - def table_checksum(self, table, dbname="postgres"): - con = self.connect(dbname=dbname) - - curname = "cur_"+str(random.randint(0,2**48)) +def base36dec(id): + return int(id, 36) - con.execute(""" - DECLARE %s NO SCROLL CURSOR FOR - SELECT t::text FROM %s as t - """ % (curname, table)) - sum = hashlib.md5() - while True: - rows = con.execute("FETCH FORWARD 5000 FROM %s" % curname) - if not rows: - break - for row in rows: - # hash uses SipHash since Python3.4, therefore it is good enough - sum.update(row[0].encode('utf8')) - - con.execute("CLOSE %s; ROLLBACK;" % curname) - - con.close() - return sum.hexdigest() - -class ProbackupTest(object): +class ProbackupTest(unittest.TestCase): # Class attributes - enterprise = is_enterprise() - enable_nls = is_nls_enabled() - pgpro = is_pgpro() + enterprise = init_params.is_enterprise + shardman = init_params.is_shardman + enable_nls = init_params.is_nls_enabled + enable_lz4 = init_params.is_lz4_enabled + pgpro = init_params.is_pgpro + verbose = init_params.verbose + username = init_params.username + remote = init_params.remote + ptrack = init_params.ptrack + paranoia = init_params.paranoia + tests_source_path = os.path.join(init_params.source_path, 'tests') + archive_compress = init_params.archive_compress + compress_suffix = init_params.compress_suffix + pg_config_version = init_params.pg_config_version + probackup_path = init_params.probackup_path + probackup_old_path = init_params.probackup_old_path + probackup_version = init_params.probackup_version + old_probackup_version = init_params.old_probackup_version + cfs_compress_default = init_params.cfs_compress + EXTERNAL_DIRECTORY_DELIMITER = init_params.EXTERNAL_DIRECTORY_DELIMITER + s3_type = os.environ.get('PG_PROBACKUP_S3_TEST') + + auto_compress_alg = True def __init__(self, *args, **kwargs): - super(ProbackupTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) + self.output = None + self.cmd = None self.nodes_to_cleanup = [] if isinstance(self, unittest.TestCase): - self.module_name = self.id().split('.')[1] - self.fname = self.id().split('.')[3] - - if '-v' in argv or '--verbose' in argv: - self.verbose = True - else: - self.verbose = False - - self.test_env = os.environ.copy() - envs_list = [ - 'LANGUAGE', - 'LC_ALL', - 'PGCONNECT_TIMEOUT', - 'PGDATA', - 'PGDATABASE', - 'PGHOSTADDR', - 'PGREQUIRESSL', - 'PGSERVICE', - 'PGSSLMODE', - 'PGUSER', - 'PGPORT', - 'PGHOST' - ] - - for e in envs_list: try: - del self.test_env[e] - except: - pass - - self.test_env['LC_MESSAGES'] = 'C' - self.test_env['LC_TIME'] = 'C' - - self.gdb = 'PGPROBACKUP_GDB' in self.test_env and \ - self.test_env['PGPROBACKUP_GDB'] == 'ON' - - self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \ - self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON' - - self.archive_compress = 'ARCHIVE_COMPRESSION' in self.test_env and \ - self.test_env['ARCHIVE_COMPRESSION'] == 'ON' - - try: - testgres.configure_testgres( - cache_initdb=False, - cached_initdb_dir=False, - cache_pg_config=False, - node_cleanup_full=False) - except: - pass - - self.helpers_path = os.path.dirname(os.path.realpath(__file__)) - self.dir_path = os.path.abspath( - os.path.join(self.helpers_path, os.pardir) - ) - self.tmp_path = os.path.abspath( - os.path.join(self.dir_path, 'tmp_dirs') - ) - try: - os.makedirs(os.path.join(self.dir_path, 'tmp_dirs')) - except: - pass - - self.user = self.get_username() - self.probackup_path = None - if 'PGPROBACKUPBIN' in self.test_env: - if shutil.which(self.test_env["PGPROBACKUPBIN"]): - self.probackup_path = self.test_env["PGPROBACKUPBIN"] - else: - if self.verbose: - print('PGPROBACKUPBIN is not an executable file') - - if not self.probackup_path: - probackup_path_tmp = os.path.join( - testgres.get_pg_config()['BINDIR'], 'pg_probackup') - - if os.path.isfile(probackup_path_tmp): - if not os.access(probackup_path_tmp, os.X_OK): - print('{0} is not an executable file'.format( - probackup_path_tmp)) - else: - self.probackup_path = probackup_path_tmp - - if not self.probackup_path: - probackup_path_tmp = os.path.abspath(os.path.join( - self.dir_path, '../pg_probackup')) - - if os.path.isfile(probackup_path_tmp): - if not os.access(probackup_path_tmp, os.X_OK): - print('{0} is not an executable file'.format( - probackup_path_tmp)) - else: - self.probackup_path = probackup_path_tmp - - if not self.probackup_path: - print('pg_probackup binary is not found') - exit(1) - - if os.name == 'posix': - self.EXTERNAL_DIRECTORY_DELIMITER = ':' - os.environ['PATH'] = os.path.dirname( - self.probackup_path) + ':' + os.environ['PATH'] - - elif os.name == 'nt': - self.EXTERNAL_DIRECTORY_DELIMITER = ';' - os.environ['PATH'] = os.path.dirname( - self.probackup_path) + ';' + os.environ['PATH'] - - self.probackup_old_path = None - - if 'PGPROBACKUPBIN_OLD' in self.test_env: - if ( - os.path.isfile(self.test_env['PGPROBACKUPBIN_OLD']) and - os.access(self.test_env['PGPROBACKUPBIN_OLD'], os.X_OK) - ): - self.probackup_old_path = self.test_env['PGPROBACKUPBIN_OLD'] - else: - if self.verbose: - print('PGPROBACKUPBIN_OLD is not an executable file') - - self.probackup_version = None - self.old_probackup_version = None - - try: - self.probackup_version_output = subprocess.check_output( - [self.probackup_path, "--version"], - stderr=subprocess.STDOUT, - ).decode('utf-8') - except subprocess.CalledProcessError as e: - raise ProbackupException(e.output.decode('utf-8')) - - if self.probackup_old_path: - old_probackup_version_output = subprocess.check_output( - [self.probackup_old_path, "--version"], - stderr=subprocess.STDOUT, - ).decode('utf-8') - self.old_probackup_version = re.search( - r"\d+\.\d+\.\d+", - subprocess.check_output( - [self.probackup_old_path, "--version"], - stderr=subprocess.STDOUT, - ).decode('utf-8') - ).group(0) - - self.probackup_version = re.search(r"\d+\.\d+\.\d+", self.probackup_version_output).group(0) - - self.remote = False - self.remote_host = None - self.remote_port = None - self.remote_user = None - - if 'PGPROBACKUP_SSH_REMOTE' in self.test_env: - if self.test_env['PGPROBACKUP_SSH_REMOTE'] == 'ON': - self.remote = True - - self.ptrack = False - if 'PG_PROBACKUP_PTRACK' in self.test_env: - if self.test_env['PG_PROBACKUP_PTRACK'] == 'ON': - if self.pg_config_version >= self.version_to_num('11.0'): - self.ptrack = True - - os.environ["PGAPPNAME"] = "pg_probackup" + self.module_name = self.id().split('.')[-2] + self.fname = self.id().split('.')[-1] + except IndexError: + print("Couldn't get module name and function name from self.id(): `{}`".format(self.id())) + self.module_name = self.module_name if self.module_name else str(self).split('(')[1].split('.')[1] + self.fname = str(self).split('(')[0] + + self.test_env = init_params.test_env() + + if self.s3_type != "minio": + if 'PG_PROBACKUP_S3_HOST' in self.test_env: + del(self.test_env['PG_PROBACKUP_S3_HOST']) + if 'PG_PROBACKUP_S3_PORT' in self.test_env: + del(self.test_env['PG_PROBACKUP_S3_PORT']) + + self.rel_path = os.path.join(self.module_name, self.fname) + self.test_path = os.path.join(init_params.tmp_path, self.rel_path) + + self.pg_node = testgres.NodeApp(self.test_path, self.nodes_to_cleanup) + self.pg_node.os_ops.set_env('LANGUAGE','en') + + # Cleanup FS dependent part first + self.backup_dir = self.build_backup_dir('backup') + self.backup_dir.cleanup() + # Recreate the rest which should reside on local file system only + shutil.rmtree(self.test_path, ignore_errors=True) + os.makedirs(self.test_path) + + self.pb_log_path = os.path.join(self.test_path, "pb_log") + self.pb = ProbackupApp(self, self.pg_node, self.pb_log_path, self.test_env, + self.auto_compress_alg, self.backup_dir) def is_test_result_ok(test_case): # sources of solution: @@ -414,29 +211,39 @@ def is_test_result_ok(test_case): # # 2. python versions 3.11+ mixin, verified on 3.11, taken from: https://stackoverflow.com/a/39606065 - if not isinstance(test_case, unittest.TestCase): - raise AssertionError("test_case is not instance of unittest.TestCase") - - if hasattr(test_case, '_outcome'): # Python 3.4+ - if hasattr(test_case._outcome, 'errors'): - # Python 3.4 - 3.10 (These two methods have no side effects) - result = test_case.defaultTestResult() # These two methods have no side effects - test_case._feedErrorsToResult(result, test_case._outcome.errors) - else: - # Python 3.11+ - result = test_case._outcome.result - else: # Python 2.7, 3.0-3.3 - result = getattr(test_case, '_outcomeForDoCleanups', test_case._resultForDoCleanups) + if hasattr(test_case._outcome, 'errors'): + # Python 3.4 - 3.10 (These two methods have no side effects) + result = test_case.defaultTestResult() # These two methods have no side effects + test_case._feedErrorsToResult(result, test_case._outcome.errors) + else: + # Python 3.11+ and pytest 5.3.5+ + result = test_case._outcome.result + if not hasattr(result, 'errors'): + result.errors = [] + if not hasattr(result, 'failures'): + result.failures = [] ok = all(test != test_case for test, text in result.errors + result.failures) + # check subtests as well + ok = ok and all(getattr(test, 'test_case', None) != test_case + for test, text in result.errors + result.failures) + + # for pytest 8+ + if hasattr(result, '_excinfo'): + if result._excinfo is not None and len(result._excinfo) > 0: + # if test was successful, _excinfo will be None, else it will be non-empty list + ok = False return ok def tearDown(self): + node_crashed = None if self.is_test_result_ok(): for node in self.nodes_to_cleanup: + if node.is_started and node.status() != NodeStatus.Running: + node_crashed = node node.cleanup() - self.del_test_dir(self.module_name, self.fname) + self.del_test_dirs() else: for node in self.nodes_to_cleanup: @@ -446,143 +253,27 @@ def tearDown(self): self.nodes_to_cleanup.clear() - @property - def pg_config_version(self): - return self.version_to_num( - testgres.get_pg_config()['VERSION'].split(" ")[1]) - -# if 'PGPROBACKUP_SSH_HOST' in self.test_env: -# self.remote_host = self.test_env['PGPROBACKUP_SSH_HOST'] -# else -# print('PGPROBACKUP_SSH_HOST is not set') -# exit(1) -# -# if 'PGPROBACKUP_SSH_PORT' in self.test_env: -# self.remote_port = self.test_env['PGPROBACKUP_SSH_PORT'] -# else -# print('PGPROBACKUP_SSH_PORT is not set') -# exit(1) -# -# if 'PGPROBACKUP_SSH_USER' in self.test_env: -# self.remote_user = self.test_env['PGPROBACKUP_SSH_USER'] -# else -# print('PGPROBACKUP_SSH_USER is not set') -# exit(1) - - def make_empty_node( - self, - base_dir=None): - real_base_dir = os.path.join(self.tmp_path, base_dir) - shutil.rmtree(real_base_dir, ignore_errors=True) - os.makedirs(real_base_dir) - - node = PostgresNodeExtended(base_dir=real_base_dir) - node.should_rm_dirs = True - self.nodes_to_cleanup.append(node) - - return node - - def make_simple_node( - self, - base_dir=None, - set_replication=False, - ptrack_enable=False, - initdb_params=[], - pg_options={}): - - node = self.make_empty_node(base_dir) - node.init( - initdb_params=initdb_params, allow_streaming=set_replication) - - # set major version - with open(os.path.join(node.data_dir, 'PG_VERSION')) as f: - node.major_version_str = str(f.read().rstrip()) - node.major_version = float(node.major_version_str) - - # Sane default parameters - options = {} - options['max_connections'] = 100 - options['shared_buffers'] = '10MB' - options['fsync'] = 'off' - - options['wal_level'] = 'logical' - options['hot_standby'] = 'off' - - options['log_line_prefix'] = '%t [%p]: [%l-1] ' - options['log_statement'] = 'none' - options['log_duration'] = 'on' - options['log_min_duration_statement'] = 0 - options['log_connections'] = 'on' - options['log_disconnections'] = 'on' - options['restart_after_crash'] = 'off' - options['autovacuum'] = 'off' - - # Allow replication in pg_hba.conf - if set_replication: - options['max_wal_senders'] = 10 - - if ptrack_enable: - options['ptrack.map_size'] = '128' - options['shared_preload_libraries'] = 'ptrack' - - if node.major_version >= 13: - options['wal_keep_size'] = '200MB' - else: - options['wal_keep_segments'] = '100' + if node_crashed: + self.fail(f"Node '{os.path.relpath(node.base_dir, self.test_path)}' unexpectingly crashed") - # set default values - self.set_auto_conf(node, options) + def build_backup_dir(self, backup='backup'): + return fs_backup_class(rel_path=self.rel_path, backup=backup) - # Apply given parameters - self.set_auto_conf(node, pg_options) + def read_pb_log(self): + with open(os.path.join(self.pb_log_path, 'pg_probackup.log')) as fl: + return fl.read() - # kludge for testgres - # https://github.com/postgrespro/testgres/issues/54 - # for PG >= 13 remove 'wal_keep_segments' parameter - if node.major_version >= 13: - self.set_auto_conf( - node, {}, 'postgresql.conf', ['wal_keep_segments']) + def unlink_pg_log(self): + os.unlink(os.path.join(self.pb_log_path, 'pg_probackup.log')) - return node - def simple_bootstrap(self, node, role) -> None: node.safe_psql( 'postgres', 'CREATE ROLE {0} WITH LOGIN REPLICATION'.format(role)) - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0};'.format(role)) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + if self.pg_config_version < 150000: node.safe_psql( 'postgres', 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' @@ -620,12 +311,12 @@ def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False 'select exists' " (select 1 from pg_tablespace where spcname = '{0}')".format( tblspc_name) - ) + ) # Check that tablespace with name 'tblspc_name' do not exists already self.assertFalse( res[0][0], 'Tablespace "{0}" already exists'.format(tblspc_name) - ) + ) if not tblspc_path: tblspc_path = os.path.join( @@ -633,7 +324,13 @@ def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format( tblspc_name, tblspc_path) if cfs: - cmd += ' with (compression=true)' + + if cfs is True and self.cfs_compress_default: + cfs = self.cfs_compress_default + if cfs is True or node.major_version < 12: + cmd += ' with (compression=true)' + else: + cmd += ' with (compression=' + cfs + ')' if not os.path.exists(tblspc_path): os.makedirs(tblspc_path) @@ -649,12 +346,12 @@ def drop_tblspace(self, node, tblspc_name): 'select exists' " (select 1 from pg_tablespace where spcname = '{0}')".format( tblspc_name) - ) + ) # Check that tablespace with name 'tblspc_name' do not exists already self.assertTrue( res[0][0], 'Tablespace "{0}" do not exists'.format(tblspc_name) - ) + ) rels = node.execute( "postgres", @@ -671,7 +368,6 @@ def drop_tblspace(self, node, tblspc_name): 'postgres', 'DROP TABLESPACE {0}'.format(tblspc_name)) - def get_tblspace_path(self, node, tblspc_name): return os.path.join(node.base_dir, tblspc_name) @@ -686,13 +382,13 @@ def get_fork_path(self, node, fork_name): 'postgres', "select pg_relation_filepath('{0}')".format( fork_name))[0][0] - ) + ) def get_md5_per_page_for_fork(self, file, size_in_pages): pages_per_segment = {} md5_per_page = {} size_in_pages = int(size_in_pages) - nsegments = int(size_in_pages/131072) + nsegments = int(size_in_pages / 131072) if size_in_pages % 131072 != 0: nsegments = nsegments + 1 @@ -712,9 +408,9 @@ def get_md5_per_page_for_fork(self, file, size_in_pages): end_page = pages_per_segment[segment_number] else: file_desc = os.open( - file+'.{0}'.format(segment_number), os.O_RDONLY - ) - start_page = max(md5_per_page)+1 + file + '.{0}'.format(segment_number), os.O_RDONLY + ) + start_page = max(md5_per_page) + 1 end_page = end_page + pages_per_segment[segment_number] for page in range(start_page, end_page): @@ -726,38 +422,34 @@ def get_md5_per_page_for_fork(self, file, size_in_pages): return md5_per_page - def get_ptrack_bits_per_page_for_fork(self, node, file, size=[]): + def get_ptrack_bits_per_page_for_fork(self, node, file, size=None): - if self.get_pgpro_edition(node) == 'enterprise': - if self.get_version(node) < self.version_to_num('10.0'): - header_size = 48 - else: - header_size = 24 - else: - header_size = 24 + if size is None: + size = [] + header_size = 24 ptrack_bits_for_fork = [] # TODO: use macro instead of hard coded 8KB - page_body_size = 8192-header_size + page_body_size = 8192 - header_size # Check that if main fork file size is 0, it`s ok # to not having a _ptrack fork if os.path.getsize(file) == 0: return ptrack_bits_for_fork byte_size = os.path.getsize(file + '_ptrack') - npages = int(byte_size/8192) + npages = int(byte_size / 8192) if byte_size % 8192 != 0: print('Ptrack page is not 8k aligned') - exit(1) + sys.exit(1) file = os.open(file + '_ptrack', os.O_RDONLY) for page in range(npages): - offset = 8192*page+header_size + offset = 8192 * page + header_size os.lseek(file, offset, 0) lots_of_bytes = os.read(file, page_body_size) byte_list = [ - lots_of_bytes[i:i+1] for i in range(len(lots_of_bytes)) - ] + lots_of_bytes[i:i + 1] for i in range(len(lots_of_bytes)) + ] for byte in byte_list: # byte_inverted = bin(int(byte, base=16))[2:][::-1] # bits = (byte >> x) & 1 for x in range(7, -1, -1) @@ -832,7 +524,7 @@ def check_ptrack_sanity(self, idx_dict): # corresponding page in old_pages are been dealt with. # We can now safely proceed to comparing old and new pages if idx_dict['new_pages'][ - PageNum] != idx_dict['old_pages'][PageNum]: + PageNum] != idx_dict['old_pages'][PageNum]: # Page has been changed, # meaning that ptrack should be equal to 1 if idx_dict['ptrack'][PageNum] != 1: @@ -878,23 +570,82 @@ def check_ptrack_sanity(self, idx_dict): # ) def get_backup_filelist(self, backup_dir, instance, backup_id): - - filelist_path = os.path.join( - backup_dir, 'backups', - instance, backup_id, 'backup_content.control') - - with open(filelist_path, 'r') as f: - filelist_raw = f.read() - - filelist_splitted = filelist_raw.splitlines() + path = os.path.join('backups', instance, backup_id, 'backup_content.control') + filelist_raw = backup_dir.read_file(path) filelist = {} - for line in filelist_splitted: + for line in io.StringIO(filelist_raw): line = json.loads(line) filelist[line['path']] = line return filelist + def get_backup_listdir(self, backup_dir, instance, backup_id, sub_path): + subpath = os.path.join('backups', instance, backup_id, sub_path) + return backup_dir.list_files(subpath) + + def get_backups_dirs(self, backup_dir, instance): + subpath = os.path.join("backups", instance) + return backup_dir.list_dirs(subpath) + + def read_backup_file(self, backup_dir, instance, backup_id, + sub_path, *, text=False): + subpath = os.path.join('backups', instance, backup_id, sub_path) + return backup_dir.read_file(subpath, text=text) + + def write_backup_file(self, backup_dir, instance, backup_id, + sub_path, content, *, text=False): + subpath = os.path.join('backups', instance, backup_id, sub_path) + return backup_dir.write_file(subpath, content, text=text) + + def corrupt_backup_file(self, backup_dir, instance, backup_id, sub_path, *, + damage: tuple = None, + truncate: int = None, + overwrite=None, + text=False): + subpath = os.path.join('backups', instance, backup_id, sub_path) + if overwrite: + content = overwrite + elif truncate == 0: + content = '' if text else b'' + else: + content = backup_dir.read_file(subpath, text=text) + if damage: + pos, replace = damage + content = content[:pos] + replace + content[pos + len(replace):] + if truncate is not None: + content = content[:truncate] + backup_dir.write_file(subpath, content, text=text) + + def remove_backup_file(self, backup_dir, instance, backup_id, sub_path): + subpath = os.path.join('backups', instance, backup_id, sub_path) + backup_dir.remove_file(subpath) + + def backup_file_exists(self, backup_dir, instance, backup_id, sub_path): + subpath = os.path.join('backups', instance, backup_id, sub_path) + return backup_dir.exists(subpath) + + def remove_backup_config(self, backup_dir, instance): + subpath = os.path.join('backups', instance, 'pg_probackup.conf') + backup_dir.remove_file(subpath) + + @contextlib.contextmanager + def modify_backup_config(self, backup_dir, instance): + path = os.path.join('backups', instance, 'pg_probackup.conf') + control_file = backup_dir.read_file(path) + cf = ProbackupTest.ControlFileContainer(control_file) + yield cf + if control_file != cf.data: + backup_dir.write_file(path, cf.data) + + def remove_one_backup(self, backup_dir, instance, backup_id): + subpath = os.path.join('backups', instance, backup_id) + backup_dir.remove_dir(subpath) + + def remove_one_backup_instance(self, backup_dir, instance): + subpath = os.path.join('backups', instance) + backup_dir.remove_dir(subpath) + # return dict of files from filelist A, # which are not exists in filelist_B def get_backup_filelist_diff(self, filelist_A, filelist_B): @@ -906,6 +657,62 @@ def get_backup_filelist_diff(self, filelist_A, filelist_B): return filelist_diff + def get_instance_wal_list(self, backup_dir, instance): + files = map(str, backup_dir.list_files(os.path.join('wal', instance))) + files = [f for f in files + if not any(x in f for x in ('.backup', '.history', '~tmp'))] + files.sort() + return files + + def read_instance_wal(self, backup_dir, instance, file, decompress=False): + content = backup_dir.read_file(f'wal/{instance}/{file}', text=False) + if decompress: + content = _do_decompress(file, content) + return content + + def write_instance_wal(self, backup_dir, instance, file, data, compress=False): + if compress: + data = _do_compress(file, data) + return backup_dir.write_file(f'wal/{instance}/{file}', data, text=False) + + def corrupt_instance_wal(self, backup_dir, instance, file, pos, damage, decompressed=False): + subpath = f'wal/{instance}/{file}' + content = backup_dir.read_file(subpath, text=False) + if decompressed: + content = _do_decompress(subpath, content) + content = content[:pos] + \ + bytes(d^c for d, c in zip(content[pos:pos+len(damage)], damage)) + \ + content[pos + len(damage):] + if decompressed: + content = _do_compress(subpath, content) + backup_dir.write_file(subpath, content, text=False) + + def remove_instance_wal(self, backup_dir, instance, file): + backup_dir.remove_file(f'wal/{instance}/{file}') + + def instance_wal_exists(self, backup_dir, instance, file): + fl = f'wal/{instance}/{file}' + return backup_dir.exists(fl) + + def wait_instance_wal_exists(self, backup_dir, instance, file, timeout=300): + start = time.time() + fl = f'wal/{instance}/{file}' + while time.time() - start < timeout: + if backup_dir.exists(fl): + break + time.sleep(0.25) + + def wait_server_wal_exists(self, data_dir, wal_dir, file, timeout=300): + start = time.time() + fl = f'{data_dir}/{wal_dir}/{file}' + while time.time() - start < timeout: + if os.path.exists(fl): + return + time.sleep(0.25) + + def remove_instance_waldir(self, backup_dir, instance): + backup_dir.remove_dir(f'wal/{instance}') + # used for partial restore def truncate_every_file_in_dir(self, path): for file in os.listdir(path): @@ -942,487 +749,43 @@ def check_ptrack_clean(self, idx_dict, size): ) ) - def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, return_id=True, env=None): - if not self.probackup_old_path and old_binary: - print('PGPROBACKUPBIN_OLD is not set') - exit(1) - - if old_binary: - binary_path = self.probackup_old_path - else: - binary_path = self.probackup_path + def read_backup_content_control(self, backup_id, instance_name): + """ + Read the content control file of a backup. + Args: backup_id (str): The ID of the backup. + instance_name (str): The name of the instance + Returns: dict: The parsed JSON content of the backup_content.control file. + Raises: + FileNotFoundError: If the backup content control file does not exist. + json.JSONDecodeError: If the backup content control file is not a valid JSON. + """ + content_control_path = f'{self.backup_dir.path}/backups/{instance_name}/{backup_id}/backup_content.control' - if not env: - env=self.test_env + if not os.path.exists(content_control_path): + raise FileNotFoundError(f"Backup content control file '{content_control_path}' does not exist.") try: - self.cmd = [' '.join(map(str, [binary_path] + command))] - if self.verbose: - print(self.cmd) - if gdb: - return GDBobj([binary_path] + command, self) - if asynchronous: - return subprocess.Popen( - [binary_path] + command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env - ) - else: - self.output = subprocess.check_output( - [binary_path] + command, - stderr=subprocess.STDOUT, - env=env - ).decode('utf-8') - if command[0] == 'backup' and return_id: - # return backup ID - for line in self.output.splitlines(): - if 'INFO: Backup' and 'completed' in line: - return line.split()[2] - else: - return self.output - except subprocess.CalledProcessError as e: - raise ProbackupException(e.output.decode('utf-8').replace("\r",""), - self.cmd) - - def run_binary(self, command, asynchronous=False, env=None): - - if not env: - env = self.test_env - - if self.verbose: - print([' '.join(map(str, command))]) - try: - if asynchronous: - return subprocess.Popen( - command, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env - ) - else: - self.output = subprocess.check_output( - command, - stderr=subprocess.STDOUT, - env=env - ).decode('utf-8') - return self.output - except subprocess.CalledProcessError as e: - raise ProbackupException(e.output.decode('utf-8'), command) - - def init_pb(self, backup_dir, options=[], old_binary=False): - - shutil.rmtree(backup_dir, ignore_errors=True) - - # don`t forget to kill old_binary after remote ssh release - if self.remote and not old_binary: - options = options + [ - '--remote-proto=ssh', - '--remote-host=localhost'] - - return self.run_pb([ - 'init', - '-B', backup_dir - ] + options, - old_binary=old_binary - ) - - def add_instance(self, backup_dir, instance, node, old_binary=False, options=[]): - - cmd = [ - 'add-instance', - '--instance={0}'.format(instance), - '-B', backup_dir, - '-D', node.data_dir - ] - - # don`t forget to kill old_binary after remote ssh release - if self.remote and not old_binary: - options = options + [ - '--remote-proto=ssh', - '--remote-host=localhost'] - - return self.run_pb(cmd + options, old_binary=old_binary) - - def set_config(self, backup_dir, instance, old_binary=False, options=[]): - - cmd = [ - 'set-config', - '--instance={0}'.format(instance), - '-B', backup_dir, - ] - - return self.run_pb(cmd + options, old_binary=old_binary) - - def set_backup(self, backup_dir, instance, backup_id=False, - old_binary=False, options=[]): - - cmd = [ - 'set-backup', - '-B', backup_dir - ] - - if instance: - cmd = cmd + ['--instance={0}'.format(instance)] - - if backup_id: - cmd = cmd + ['-i', backup_id] - - return self.run_pb(cmd + options, old_binary=old_binary) - - def del_instance(self, backup_dir, instance, old_binary=False): - - return self.run_pb([ - 'del-instance', - '--instance={0}'.format(instance), - '-B', backup_dir - ], - old_binary=old_binary - ) + with open(content_control_path) as file: + lines = file.readlines() + content_control_json = [] + for line in lines: + content_control_json.append(json.loads(line)) + return content_control_json + except json.JSONDecodeError as e: + raise json.JSONDecodeError(f"Failed to parse JSON in backup content control file '{content_control_path}'", + e.doc, e.pos) + + def run_pb(self, backup_dir, command, gdb=False, old_binary=False, return_id=True, env=None, + skip_log_directory=False, expect_error=False): + return self.pb.run(command, gdb, old_binary, return_id, env, skip_log_directory, expect_error, use_backup_dir=backup_dir) def clean_pb(self, backup_dir): - shutil.rmtree(backup_dir, ignore_errors=True) - - def backup_node( - self, backup_dir, instance, node, data_dir=False, - backup_type='full', datname=False, options=[], - asynchronous=False, gdb=False, - old_binary=False, return_id=True, no_remote=False, - env=None - ): - if not node and not data_dir: - print('You must provide ether node or data_dir for backup') - exit(1) - - if not datname: - datname = 'postgres' - - cmd_list = [ - 'backup', - '-B', backup_dir, - '--instance={0}'.format(instance), - # "-D", pgdata, - '-p', '%i' % node.port, - '-d', datname - ] - - if data_dir: - cmd_list += ['-D', data_dir] - - # don`t forget to kill old_binary after remote ssh release - if self.remote and not old_binary and not no_remote: - options = options + [ - '--remote-proto=ssh', - '--remote-host=localhost'] - - if backup_type: - cmd_list += ['-b', backup_type] - - if not old_binary: - cmd_list += ['--no-sync'] - - return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id, env=env) - - def checkdb_node( - self, backup_dir=False, instance=False, data_dir=False, - options=[], asynchronous=False, gdb=False, old_binary=False - ): - - cmd_list = ["checkdb"] - - if backup_dir: - cmd_list += ["-B", backup_dir] - - if instance: - cmd_list += ["--instance={0}".format(instance)] - - if data_dir: - cmd_list += ["-D", data_dir] - - return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary) - - def merge_backup( - self, backup_dir, instance, backup_id, asynchronous=False, - gdb=False, old_binary=False, options=[]): - cmd_list = [ - 'merge', - '-B', backup_dir, - '--instance={0}'.format(instance), - '-i', backup_id - ] - - return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary) - - def restore_node( - self, backup_dir, instance, node=False, - data_dir=None, backup_id=None, old_binary=False, options=[], - gdb=False - ): - - if data_dir is None: - data_dir = node.data_dir - - cmd_list = [ - 'restore', - '-B', backup_dir, - '-D', data_dir, - '--instance={0}'.format(instance) - ] - - # don`t forget to kill old_binary after remote ssh release - if self.remote and not old_binary: - options = options + [ - '--remote-proto=ssh', - '--remote-host=localhost'] - - if backup_id: - cmd_list += ['-i', backup_id] - - if not old_binary: - cmd_list += ['--no-sync'] - - return self.run_pb(cmd_list + options, gdb=gdb, old_binary=old_binary) - - def catchup_node( - self, - backup_mode, source_pgdata, destination_node, - options = [] - ): - - cmd_list = [ - 'catchup', - '--backup-mode={0}'.format(backup_mode), - '--source-pgdata={0}'.format(source_pgdata), - '--destination-pgdata={0}'.format(destination_node.data_dir) - ] - if self.remote: - cmd_list += ['--remote-proto=ssh', '--remote-host=localhost'] - if self.verbose: - cmd_list += [ - '--log-level-file=VERBOSE', - '--log-directory={0}'.format(destination_node.logs_dir) - ] - - return self.run_pb(cmd_list + options) - - def show_pb( - self, backup_dir, instance=None, backup_id=None, - options=[], as_text=False, as_json=True, old_binary=False, - env=None - ): - - backup_list = [] - specific_record = {} - cmd_list = [ - 'show', - '-B', backup_dir, - ] - if instance: - cmd_list += ['--instance={0}'.format(instance)] - - if backup_id: - cmd_list += ['-i', backup_id] - - # AHTUNG, WARNING will break json parsing - if as_json: - cmd_list += ['--format=json', '--log-level-console=error'] - - if as_text: - # You should print it when calling as_text=true - return self.run_pb(cmd_list + options, old_binary=old_binary, env=env) - - # get show result as list of lines - if as_json: - data = json.loads(self.run_pb(cmd_list + options, old_binary=old_binary)) - # print(data) - for instance_data in data: - # find specific instance if requested - if instance and instance_data['instance'] != instance: - continue - - for backup in reversed(instance_data['backups']): - # find specific backup if requested - if backup_id: - if backup['id'] == backup_id: - return backup - else: - backup_list.append(backup) - - if backup_id is not None: - self.assertTrue(False, "Failed to find backup with ID: {0}".format(backup_id)) - - return backup_list - else: - show_splitted = self.run_pb( - cmd_list + options, old_binary=old_binary, env=env).splitlines() - if instance is not None and backup_id is None: - # cut header(ID, Mode, etc) from show as single string - header = show_splitted[1:2][0] - # cut backup records from show as single list - # with string for every backup record - body = show_splitted[3:] - # inverse list so oldest record come first - body = body[::-1] - # split string in list with string for every header element - header_split = re.split(' +', header) - # Remove empty items - for i in header_split: - if i == '': - header_split.remove(i) - continue - header_split = [ - header_element.rstrip() for header_element in header_split - ] - for backup_record in body: - backup_record = backup_record.rstrip() - # split list with str for every backup record element - backup_record_split = re.split(' +', backup_record) - # Remove empty items - for i in backup_record_split: - if i == '': - backup_record_split.remove(i) - if len(header_split) != len(backup_record_split): - print(warning.format( - header=header, body=body, - header_split=header_split, - body_split=backup_record_split) - ) - exit(1) - new_dict = dict(zip(header_split, backup_record_split)) - backup_list.append(new_dict) - return backup_list - else: - # cut out empty lines and lines started with # - # and other garbage then reconstruct it as dictionary - # print show_splitted - sanitized_show = [item for item in show_splitted if item] - sanitized_show = [ - item for item in sanitized_show if not item.startswith('#') - ] - # print sanitized_show - for line in sanitized_show: - name, var = line.partition(' = ')[::2] - var = var.strip('"') - var = var.strip("'") - specific_record[name.strip()] = var - - if not specific_record: - self.assertTrue(False, "Failed to find backup with ID: {0}".format(backup_id)) - - return specific_record - - def show_archive( - self, backup_dir, instance=None, options=[], - as_text=False, as_json=True, old_binary=False, - tli=0 - ): - - cmd_list = [ - 'show', - '--archive', - '-B', backup_dir, - ] - if instance: - cmd_list += ['--instance={0}'.format(instance)] - - # AHTUNG, WARNING will break json parsing - if as_json: - cmd_list += ['--format=json', '--log-level-console=error'] - - if as_text: - # You should print it when calling as_text=true - return self.run_pb(cmd_list + options, old_binary=old_binary) - - if as_json: - if as_text: - data = self.run_pb(cmd_list + options, old_binary=old_binary) - else: - data = json.loads(self.run_pb(cmd_list + options, old_binary=old_binary)) - - if instance: - instance_timelines = None - for instance_name in data: - if instance_name['instance'] == instance: - instance_timelines = instance_name['timelines'] - break - - if tli > 0: - timeline_data = None - for timeline in instance_timelines: - if timeline['tli'] == tli: - return timeline - - return {} - - if instance_timelines: - return instance_timelines - - return data - else: - show_splitted = self.run_pb( - cmd_list + options, old_binary=old_binary).splitlines() - print(show_splitted) - exit(1) - - def validate_pb( - self, backup_dir, instance=None, backup_id=None, - options=[], old_binary=False, gdb=False, asynchronous=False - ): - - cmd_list = [ - 'validate', - '-B', backup_dir - ] - if instance: - cmd_list += ['--instance={0}'.format(instance)] - if backup_id: - cmd_list += ['-i', backup_id] - - return self.run_pb(cmd_list + options, old_binary=old_binary, gdb=gdb, asynchronous=asynchronous) - - def delete_pb( - self, backup_dir, instance, backup_id=None, - options=[], old_binary=False, gdb=False, asynchronous=False): - cmd_list = [ - 'delete', - '-B', backup_dir - ] - - cmd_list += ['--instance={0}'.format(instance)] - if backup_id: - cmd_list += ['-i', backup_id] - - return self.run_pb(cmd_list + options, old_binary=old_binary, gdb=gdb, asynchronous=asynchronous) - - def delete_expired( - self, backup_dir, instance, options=[], old_binary=False): - cmd_list = [ - 'delete', - '-B', backup_dir, - '--instance={0}'.format(instance) - ] - return self.run_pb(cmd_list + options, old_binary=old_binary) - - def show_config(self, backup_dir, instance, old_binary=False): - out_dict = {} - cmd_list = [ - 'show-config', - '-B', backup_dir, - '--instance={0}'.format(instance) - ] - - res = self.run_pb(cmd_list, old_binary=old_binary).splitlines() - for line in res: - if not line.startswith('#'): - name, var = line.partition(' = ')[::2] - out_dict[name] = var - return out_dict + fs_backup_class(backup_dir).cleanup() def get_recovery_conf(self, node): out_dict = {} - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf_path = os.path.join(node.data_dir, 'postgresql.auto.conf') with open(recovery_conf_path, 'r') as f: print(f.read()) @@ -1430,7 +793,7 @@ def get_recovery_conf(self, node): recovery_conf_path = os.path.join(node.data_dir, 'recovery.conf') with open( - recovery_conf_path, 'r' + recovery_conf_path, 'r' ) as recovery_conf: for line in recovery_conf: try: @@ -1440,157 +803,41 @@ def get_recovery_conf(self, node): out_dict[key.strip()] = value.strip(" '").replace("'\n", "") return out_dict - def set_archiving( - self, backup_dir, instance, node, replica=False, - overwrite=False, compress=True, old_binary=False, - log_level=False, archive_timeout=False, - custom_archive_command=None): - - # parse postgresql.auto.conf - options = {} - if replica: - options['archive_mode'] = 'always' - options['hot_standby'] = 'on' - else: - options['archive_mode'] = 'on' - - if custom_archive_command is None: - if os.name == 'posix': - options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format( - self.probackup_path, backup_dir, instance) - - elif os.name == 'nt': - options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format( - self.probackup_path.replace("\\","\\\\"), - backup_dir.replace("\\","\\\\"), instance) - - # don`t forget to kill old_binary after remote ssh release - if self.remote and not old_binary: - options['archive_command'] += '--remote-proto=ssh ' - options['archive_command'] += '--remote-host=localhost ' - - if self.archive_compress and compress: - options['archive_command'] += '--compress ' - - if overwrite: - options['archive_command'] += '--overwrite ' - - options['archive_command'] += '--log-level-console=VERBOSE ' - options['archive_command'] += '-j 5 ' - options['archive_command'] += '--batch-size 10 ' - options['archive_command'] += '--no-sync ' - - if archive_timeout: - options['archive_command'] += '--archive-timeout={0} '.format( - archive_timeout) - - if os.name == 'posix': - options['archive_command'] += '--wal-file-path=%p --wal-file-name=%f' - - elif os.name == 'nt': - options['archive_command'] += '--wal-file-path="%p" --wal-file-name="%f"' - - if log_level: - options['archive_command'] += ' --log-level-console={0}'.format(log_level) - options['archive_command'] += ' --log-level-file={0} '.format(log_level) - else: # custom_archive_command is not None - options['archive_command'] = custom_archive_command - - self.set_auto_conf(node, options) - - def get_restore_command(self, backup_dir, instance, node): + def get_restore_command(self, backup_dir, instance): # parse postgresql.auto.conf - restore_command = '' - if os.name == 'posix': - restore_command += '{0} archive-get -B {1} --instance={2} '.format( - self.probackup_path, backup_dir, instance) - - elif os.name == 'nt': - restore_command += '"{0}" archive-get -B {1} --instance={2} '.format( - self.probackup_path.replace("\\","\\\\"), - backup_dir.replace("\\","\\\\"), instance) + restore_command = " ".join([f'"{self.probackup_path}"', + 'archive-get', *backup_dir.pb_args]) + if os.name == 'nt': + restore_command.replace("\\", "\\\\") + restore_command += f' --instance={instance}' # don`t forget to kill old_binary after remote ssh release if self.remote: - restore_command += '--remote-proto=ssh ' - restore_command += '--remote-host=localhost ' + restore_command += ' --remote-proto=ssh' + restore_command += ' --remote-host=localhost' if os.name == 'posix': - restore_command += '--wal-file-path=%p --wal-file-name=%f' + restore_command += ' --wal-file-path=%p --wal-file-name=%f' elif os.name == 'nt': - restore_command += '--wal-file-path="%p" --wal-file-name="%f"' + restore_command += ' --wal-file-path="%p" --wal-file-name="%f"' return restore_command - # rm_options - list of parameter name that should be deleted from current config, - # example: ['wal_keep_segments', 'max_wal_size'] - def set_auto_conf(self, node, options, config='postgresql.auto.conf', rm_options={}): - - # parse postgresql.auto.conf - path = os.path.join(node.data_dir, config) - - with open(path, 'r') as f: - raw_content = f.read() - - current_options = {} - current_directives = [] - for line in raw_content.splitlines(): - - # ignore comments - if line.startswith('#'): - continue - - if line == '': - continue - - if line.startswith('include'): - current_directives.append(line) - continue - - name, var = line.partition('=')[::2] - name = name.strip() - var = var.strip() - var = var.strip('"') - var = var.strip("'") - - # remove options specified in rm_options list - if name in rm_options: - continue - - current_options[name] = var - - for option in options: - current_options[option] = options[option] - - auto_conf = '' - for option in current_options: - auto_conf += "{0} = '{1}'\n".format( - option, current_options[option]) - - for directive in current_directives: - auto_conf += directive + "\n" - - with open(path, 'wt') as f: - f.write(auto_conf) - f.flush() - f.close() - def set_replica( self, master, replica, replica_name='replica', synchronous=False, log_shipping=False - ): + ): - self.set_auto_conf( - replica, + replica.set_auto_conf( options={ 'port': replica.port, 'hot_standby': 'on'}) - if self.get_version(replica) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): with open(os.path.join(replica.data_dir, "standby.signal"), 'w') as f: f.flush() f.close() @@ -1598,11 +845,10 @@ def set_replica( config = 'postgresql.auto.conf' if not log_shipping: - self.set_auto_conf( - replica, + replica.set_auto_conf( {'primary_conninfo': 'user={0} port={1} application_name={2} ' - ' sslmode=prefer sslcompression=1'.format( - self.user, master.port, replica_name)}, + ' sslmode=prefer sslcompression=1'.format( + self.username, master.port, replica_name)}, config) else: replica.append_conf('recovery.conf', 'standby_mode = on') @@ -1612,57 +858,61 @@ def set_replica( 'recovery.conf', "primary_conninfo = 'user={0} port={1} application_name={2}" " sslmode=prefer sslcompression=1'".format( - self.user, master.port, replica_name)) + self.username, master.port, replica_name)) if synchronous: - self.set_auto_conf( - master, + master.set_auto_conf( options={ 'synchronous_standby_names': replica_name, 'synchronous_commit': 'remote_apply'}) master.reload() - def change_backup_status(self, backup_dir, instance, backup_id, status): + class ControlFileContainer(object): + __slots__ = ('data',) + + def __init__(self, data): + self.data = data - control_file_path = os.path.join( - backup_dir, 'backups', instance, backup_id, 'backup.control') - - with open(control_file_path, 'r') as f: - actual_control = f.read() - - new_control_file = '' - for line in actual_control.splitlines(): - if line.startswith('status'): - line = 'status = {0}'.format(status) - new_control_file += line - new_control_file += '\n' - - with open(control_file_path, 'wt') as f: - f.write(new_control_file) - f.flush() - f.close() - - with open(control_file_path, 'r') as f: - actual_control = f.read() - - def wrong_wal_clean(self, node, wal_size): - wals_dir = os.path.join(self.backup_dir(node), 'wal') - wals = [ - f for f in os.listdir(wals_dir) if os.path.isfile( - os.path.join(wals_dir, f)) - ] - wals.sort() - file_path = os.path.join(wals_dir, wals[-1]) - if os.path.getsize(file_path) != wal_size: - os.remove(file_path) + @contextlib.contextmanager + def modify_backup_control(self, backup_dir, instance, backup_id): + path = os.path.join('backups', instance, backup_id, 'backup.control') + control_file = backup_dir.read_file(path) + cf = ProbackupTest.ControlFileContainer(control_file) + yield cf + if control_file != cf.data: + backup_dir.write_file(path, cf.data) + + def change_backup_status(self, backup_dir, instance, backup_id, status): + with self.modify_backup_control(backup_dir, instance, backup_id) as cf: + cf.data = re.sub(r'status = \w+', f'status = {status}', cf.data, 1) + + def get_locks(self, backup_dir : TestBackupDir, node : str): + path = "backups/" + node + "/locks" + return backup_dir.list_files(path) + + def read_lock(self, backup_dir : TestBackupDir, node : str, lock : str): + path = "backups/" + node + "/locks/" + lock + return backup_dir.read_file(path, text=False) + + def expire_locks(self, backup_dir : TestBackupDir, node : str, seconds=1): + path = "backups/" + node + "/locks" + now = time.time() + expired = base36enc(int(now) - seconds) + for lock in backup_dir.list_files(path): + base, ts, exclusive = lock.rsplit("_", 2) + lock_expired = "_".join([base, expired, exclusive]) + content = backup_dir.read_file(path+"/"+lock, text = False) + backup_dir.remove_file(path+"/"+lock) + backup_dir.write_file(path+"/"+lock_expired, content, text = False) def guc_wal_segment_size(self, node): var = node.execute( 'postgres', "select setting from pg_settings where name = 'wal_segment_size'" ) - return int(var[0][0]) * self.guc_wal_block_size(node) + print(int(var[0][0])) + return int(var[0][0]) def guc_wal_block_size(self, node): var = node.execute( @@ -1673,19 +923,15 @@ def guc_wal_block_size(self, node): def get_pgpro_edition(self, node): if node.execute( - 'postgres', - "select exists (select 1 from" - " pg_proc where proname = 'pgpro_edition')" + 'postgres', + "select exists (select 1 from" + " pg_proc where proname = 'pgpro_edition')" )[0][0]: var = node.execute('postgres', 'select pgpro_edition()') return str(var[0][0]) else: return False - def get_username(self): - """ Returns current user name """ - return getpass.getuser() - def version_to_num(self, version): if not version: return 0 @@ -1697,100 +943,93 @@ def version_to_num(self, version): num = num * 100 + int(re.sub(r"[^\d]", "", part)) return num - def switch_wal_segment(self, node): + def switch_wal_segment(self, node, sleep_seconds=1, and_tx=False): """ - Execute pg_switch_wal/xlog() in given node + Execute pg_switch_wal() in given node Args: node: an instance of PostgresNode or NodeConnection class """ if isinstance(node, testgres.PostgresNode): - if self.version_to_num( - node.safe_psql('postgres', 'show server_version').decode('utf-8') - ) >= self.version_to_num('10.0'): - node.safe_psql('postgres', 'select pg_switch_wal()') - else: - node.safe_psql('postgres', 'select pg_switch_xlog()') + with node.connect('postgres') as con: + if and_tx: + con.execute('select txid_current()') + lsn = con.execute('select pg_switch_wal()')[0][0] else: - if self.version_to_num( - node.execute('show server_version')[0][0] - ) >= self.version_to_num('10.0'): - node.execute('select pg_switch_wal()') - else: - node.execute('select pg_switch_xlog()') + lsn = node.execute('select pg_switch_wal()')[0][0] - sleep(1) + if sleep_seconds > 0: + time.sleep(sleep_seconds) + return lsn - def wait_until_replica_catch_with_master(self, master, replica): - - version = master.safe_psql( - 'postgres', - 'show server_version').decode('utf-8').rstrip() + @contextlib.contextmanager + def switch_wal_after(self, node, seconds, and_tx=True): + tm = threading.Timer(seconds, self.switch_wal_segment, [node, 0, and_tx]) + tm.start() + try: + yield + finally: + tm.cancel() + tm.join() - if self.version_to_num(version) >= self.version_to_num('10.0'): - master_function = 'pg_catalog.pg_current_wal_lsn()' - replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' - else: - master_function = 'pg_catalog.pg_current_xlog_location()' - replica_function = 'pg_catalog.pg_last_xlog_replay_location()' + def wait_until_replica_catch_with_master(self, master, replica): + master_function = 'pg_catalog.pg_current_wal_insert_lsn()' lsn = master.safe_psql( 'postgres', 'SELECT {0}'.format(master_function)).decode('utf-8').rstrip() # Wait until replica catch up with master + self.wait_until_lsn_replayed(replica, lsn) + return lsn + + def wait_until_lsn_replayed(self, replica, lsn): + replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' replica.poll_query_until( 'postgres', "SELECT '{0}'::pg_lsn <= {1}".format(lsn, replica_function)) - def get_version(self, node): - return self.version_to_num( - testgres.get_pg_config()['VERSION'].split(" ")[1]) - def get_ptrack_version(self, node): version = node.safe_psql( "postgres", "SELECT extversion " - "FROM pg_catalog.pg_extension WHERE extname = 'ptrack'").decode('utf-8').rstrip() + "FROM pg_catalog.pg_extension WHERE extname = 'ptrack'").decode('utf-8').rstrip() return self.version_to_num(version) def get_bin_path(self, binary): return testgres.get_bin_path(binary) - def del_test_dir(self, module_name, fname): + def del_test_dirs(self): """ Del testdir and optimistically try to del module dir""" - - shutil.rmtree( - os.path.join( - self.tmp_path, - module_name, - fname - ), - ignore_errors=True - ) + # Remove FS dependent part first + self.backup_dir.cleanup() + # Remove all the rest + if init_params.delete_logs: + shutil.rmtree(self.test_path, ignore_errors=True) def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): """ return dict with directory content. " " TAKE IT AFTER CHECKPOINT or BACKUP""" - dirs_to_ignore = [ + dirs_to_ignore = { 'pg_xlog', 'pg_wal', 'pg_log', 'pg_stat_tmp', 'pg_subtrans', 'pg_notify' - ] - files_to_ignore = [ + } + files_to_ignore = { 'postmaster.pid', 'postmaster.opts', 'pg_internal.init', 'postgresql.auto.conf', - 'backup_label', 'tablespace_map', 'recovery.conf', + 'backup_label', 'backup_label.old', + 'tablespace_map', 'recovery.conf', 'ptrack_control', 'ptrack_init', 'pg_control', 'probackup_recovery.conf', 'recovery.signal', 'standby.signal', 'ptrack.map', 'ptrack.map.mmap', - 'ptrack.map.tmp' - ] + 'ptrack.map.tmp', 'recovery.done' + } if exclude_dirs: - dirs_to_ignore = dirs_to_ignore + exclude_dirs -# suffixes_to_ignore = ( -# '_ptrack' -# ) + dirs_to_ignore |= set(exclude_dirs) + # suffixes_to_ignore = ( + # '_ptrack' + # ) directory_dict = {} directory_dict['pgdata'] = pgdata directory_dict['files'] = {} @@ -1799,10 +1038,10 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): dirs[:] = [d for d in dirs if d not in dirs_to_ignore] for file in files: if ( - file in files_to_ignore or - (ignore_ptrack and file.endswith('_ptrack')) + file in files_to_ignore or + (ignore_ptrack and file.endswith('_ptrack')) ): - continue + continue file_fullpath = os.path.join(root, file) file_relpath = os.path.relpath(file_fullpath, pgdata) @@ -1812,11 +1051,11 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): # truncate cfm's content's zero tail if file_relpath.endswith('.cfm'): content = f.read() - zero64 = b"\x00"*64 + zero64 = b"\x00" * 64 l = len(content) while l > 64: s = (l - 1) & ~63 - if content[s:l] != zero64[:l-s]: + if content[s:l] != zero64[:l - s]: break l = s content = content[:l] @@ -1824,17 +1063,17 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): else: digest = hashlib.md5() while True: - b = f.read(64*1024) + b = f.read(64 * 1024) if not b: break digest.update(b) cfile.md5 = digest.hexdigest() # crappy algorithm if cfile.is_datafile: - size_in_pages = os.path.getsize(file_fullpath)/8192 + size_in_pages = os.path.getsize(file_fullpath) / 8192 cfile.md5_per_page = self.get_md5_per_page_for_fork( - file_fullpath, size_in_pages - ) + file_fullpath, size_in_pages + ) for directory in dirs: directory_path = os.path.join(root, directory) @@ -1866,14 +1105,13 @@ def get_known_bugs_comparision_exclusion_dict(self, node): "FROM pg_am, pg_class " "WHERE pg_am.amname = 'spgist' " "AND pg_class.relam = pg_am.oid" - ).decode('utf-8').rstrip().splitlines() + ).decode('utf-8').rstrip().splitlines() for filename in spgist_filelist: comparision_exclusion_dict[filename] = set([0]) return comparision_exclusion_dict - - def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict()): + def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict=dict()): """ return dict with directory content. DO IT BEFORE RECOVERY exclusion_dict is used for exclude files (and it block_no) from comparision @@ -1969,7 +1207,6 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict os.path.join(restored_pgdata['pgdata'], file) ) - for page in sorted(restored_pages - original_pages): error_message += '\n Extra page {0}\n File: {1}\n'.format( page, @@ -1992,264 +1229,188 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict restored.md5_per_page[page], os.path.join( restored_pgdata['pgdata'], file) - ) + ) self.assertFalse(fail, error_message) - def gdb_attach(self, pid): - return GDBobj([str(pid)], self, attach=True) - - def _check_gdb_flag_or_skip_test(self): - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - - -class GdbException(Exception): - def __init__(self, message="False"): - self.message = message - - def __str__(self): - return '\n ERROR: {0}\n'.format(repr(self.message)) - - -class GDBobj: - def __init__(self, cmd, env, attach=False): - self.verbose = env.verbose - self.output = '' - - # Check gdb flag is set up - if not env.gdb: - raise GdbException("No `PGPROBACKUP_GDB=on` is set, " - "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " - "and be skipped") - # Check gdb presense - try: - gdb_version, _ = subprocess.Popen( - ['gdb', '--version'], - stdout=subprocess.PIPE - ).communicate() - except OSError: - raise GdbException("Couldn't find gdb on the path") - - self.base_cmd = [ - 'gdb', - '--interpreter', - 'mi2', - ] - - if attach: - self.cmd = self.base_cmd + ['--pid'] + cmd - else: - self.cmd = self.base_cmd + ['--args'] + cmd - - # Get version - gdb_version_number = re.search( - br"^GNU gdb [^\d]*(\d+)\.(\d)", - gdb_version) - self.major_version = int(gdb_version_number.group(1)) - self.minor_version = int(gdb_version_number.group(2)) - - if self.verbose: - print([' '.join(map(str, self.cmd))]) - - self.proc = subprocess.Popen( - self.cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - bufsize=0, - text=True, - errors='replace', - ) - self.gdb_pid = self.proc.pid - - while True: - line = self.get_line() - - if 'No such process' in line: - raise GdbException(line) - - if not line.startswith('(gdb)'): - pass - else: - break - - def get_line(self): - line = self.proc.stdout.readline() - self.output += line - return line - - def kill(self): - self.proc.kill() - self.proc.wait() - - def set_breakpoint(self, location): + def compare_instance_dir(self, original_instance, after_backup_instance, exclusion_dict=dict()): + """ + exclusion_dict is used for exclude files (and it block_no) from comparision + it is a dict with relative filenames as keys and set of block numbers as values + """ + fail = False + error_message = 'Instance directory is not equal to original!\n' - result = self._execute('break ' + location) - for line in result: - if line.startswith('~"Breakpoint'): - return + # Compare directories + after_backup = set(after_backup_instance['dirs']) + original_dirs = set(original_instance['dirs']) - elif line.startswith('=breakpoint-created'): - return + for directory in sorted(after_backup - original_dirs): + fail = True + error_message += '\nDirectory was not present' + error_message += ' in original instance: {0}\n'.format(directory) - elif line.startswith('^error'): #or line.startswith('(gdb)'): - break + for directory in sorted(original_dirs - after_backup): + fail = True + error_message += '\nDirectory dissappeared' + error_message += ' in instance after backup: {0}\n'.format(directory) - elif line.startswith('&"break'): - pass + for directory in sorted(original_dirs & after_backup): + original = original_instance['dirs'][directory] + after_backup = after_backup_instance['dirs'][directory] + if original.mode != after_backup.mode: + fail = True + error_message += '\nDir permissions mismatch:\n' + error_message += ' Dir old: {0} Permissions: {1}\n'.format(directory, + original.mode) + error_message += ' Dir new: {0} Permissions: {1}\n'.format(directory, + after_backup.mode) - elif line.startswith('&"Function'): - raise GdbException(line) + after_backup_files = set(after_backup_instance['files']) + original_files = set(original_instance['files']) - elif line.startswith('&"No line'): - raise GdbException(line) + for file in sorted(after_backup_files - original_files): + # File is present in instance after backup + # but not present in original instance + # only backup_label is allowed + fail = True + error_message += '\nFile is not present' + error_message += ' in original instance: {0}\n'.format(file) - elif line.startswith('~"Make breakpoint pending on future shared'): - raise GdbException(line) + for file in sorted(original_files - after_backup_files): + error_message += ( + '\nFile disappearance.\n ' + 'File: {0}\n').format(file) + fail = True - raise GdbException( - 'Failed to set breakpoint.\n Output:\n {0}'.format(result) - ) + for file in sorted(original_files & after_backup_files): + original = original_instance['files'][file] + after_backup = after_backup_instance['files'][file] + if after_backup.mode != original.mode: + fail = True + error_message += '\nFile permissions mismatch:\n' + error_message += ' File_old: {0} Permissions: {1:o}\n'.format(file, + original.mode) + error_message += ' File_new: {0} Permissions: {1:o}\n'.format(file, + after_backup.mode) - def remove_all_breakpoints(self): + if original.md5 != after_backup.md5: + if file not in exclusion_dict: + fail = True + error_message += ( + '\nFile Checksum mismatch.\n' + 'File_old: {0}\nChecksum_old: {1}\n' + 'File_new: {2}\nChecksum_new: {3}\n').format(file, + original.md5, file, after_backup.md5 + ) - result = self._execute('delete') - for line in result: + if not original.is_datafile: + continue - if line.startswith('^done'): - return + original_pages = set(original.md5_per_page) + after_backup_pages = set(after_backup.md5_per_page) - raise GdbException( - 'Failed to remove breakpoints.\n Output:\n {0}'.format(result) - ) + for page in sorted(original_pages - after_backup_pages): + error_message += '\n Page {0} dissappeared.\n File: {1}\n'.format( + page, file) - def run_until_break(self): - result = self._execute('run', False) - for line in result: - if line.startswith('*stopped,reason="breakpoint-hit"'): - return - raise GdbException( - 'Failed to run until breakpoint.\n' - ) - def continue_execution_until_running(self): - result = self._execute('continue') + for page in sorted(after_backup_pages - original_pages): + error_message += '\n Extra page {0}\n File: {1}\n'.format( + page, file) - for line in result: - if line.startswith('*running') or line.startswith('^running'): - return - if line.startswith('*stopped,reason="breakpoint-hit"'): - continue - if line.startswith('*stopped,reason="exited-normally"'): - continue + for page in sorted(original_pages & after_backup_pages): + if file in exclusion_dict and page in exclusion_dict[file]: + continue - raise GdbException( - 'Failed to continue execution until running.\n' - ) + if original.md5_per_page[page] != after_backup.md5_per_page[page]: + fail = True + error_message += ( + '\n Page checksum mismatch: {0}\n ' + ' PAGE Checksum_old: {1}\n ' + ' PAGE Checksum_new: {2}\n ' + ' File: {3}\n' + ).format( + page, + original.md5_per_page[page], + after_backup.md5_per_page[page], + file + ) - def continue_execution_until_exit(self): - result = self._execute('continue', False) + self.assertFalse(fail, error_message) - for line in result: - if line.startswith('*running'): - continue - if line.startswith('*stopped,reason="breakpoint-hit"'): - continue - if ( - line.startswith('*stopped,reason="exited') or - line == '*stopped\n' - ): - return - raise GdbException( - 'Failed to continue execution until exit.\n' - ) + def gdb_attach(self, pid): + return GDBobj([str(pid)], self, attach=True) - def continue_execution_until_error(self): - result = self._execute('continue', False) + def assertMessage(self, actual=None, *, contains=None, regex=None, has_no=None): + if actual is None: + actual = self.output + if self.output and self.output != actual: # Don't want to see this twice + error_message = '\n Unexpected Error Message: `{0}`\n CMD: `{1}`'.format(repr(self.output), + self.cmd) + else: + error_message = '\n Unexpected Error Message. CMD: `{0}`'.format(self.cmd) + if contains: + self.assertIn(contains, actual, error_message) + elif regex: + self.assertRegex(actual, regex, error_message) + elif has_no: + self.assertNotIn(has_no, actual, error_message) - for line in result: - if line.startswith('^error'): - return - if line.startswith('*stopped,reason="exited'): - return - if line.startswith( - '*stopped,reason="signal-received",signal-name="SIGABRT"'): - return +def get_relative_path(run_path, data_dir): + run_path_parts = run_path.split('/') + data_dir_parts = data_dir.split('/') - raise GdbException( - 'Failed to continue execution until error.\n') + # Find index of the first different element in the lists + diff_index = 0 + for i in range(min(len(run_path_parts), len(data_dir_parts))): + if run_path_parts[i] != data_dir_parts[i]: + diff_index = i + break - def continue_execution_until_break(self, ignore_count=0): - if ignore_count > 0: - result = self._execute( - 'continue ' + str(ignore_count), - False - ) - else: - result = self._execute('continue', False) + # Build relative path + relative_path = ['..'] * (len(run_path_parts) - diff_index) + data_dir_parts[diff_index:] - for line in result: - if line.startswith('*stopped,reason="breakpoint-hit"'): - return - if line.startswith('*stopped,reason="exited-normally"'): - break + return '/'.join(relative_path) - raise GdbException( - 'Failed to continue execution until break.\n') - - def stopped_in_breakpoint(self): - while True: - line = self.get_line() - if self.verbose: - print(line) - if line.startswith('*stopped,reason="breakpoint-hit"'): - return True - return False - - def quit(self): - self.proc.terminate() - - # use for breakpoint, run, continue - def _execute(self, cmd, running=True): - output = [] - self.proc.stdin.flush() - self.proc.stdin.write(cmd + '\n') - self.proc.stdin.flush() - sleep(1) - - # look for command we just send - while True: - line = self.get_line() - if self.verbose: - print(repr(line)) - - if cmd not in line: - continue - else: - break - while True: - line = self.get_line() - output += [line] - if self.verbose: - print(repr(line)) - if line.startswith('^done') or line.startswith('*stopped'): - break - if line.startswith('^error'): - break - if running and (line.startswith('*running') or line.startswith('^running')): -# if running and line.startswith('*running'): - break - return output class ContentFile(object): __slots__ = ('is_datafile', 'mode', 'md5', 'md5_per_page') + def __init__(self, is_datafile: bool): self.is_datafile = is_datafile + class ContentDir(object): - __slots__ = ('mode') \ No newline at end of file + __slots__ = ('mode') + +def _lz4_decompress(data): + with lz4.frame.open(io.BytesIO(data), 'rb') as fl: + return fl.read() + +def _lz4_compress(data): + out = io.BytesIO() + with lz4.frame.open(out, 'wb', content_checksum=True) as fl: + fl.write(data) + return out.getvalue() + +def _do_compress(file, data): + if file.endswith('.gz'): + return gzip.compress(data, compresslevel=1) + elif file.endswith('.lz4'): + return _lz4_compress(data) + elif file.endswith('.zst'): + return zstd.compress(data, 1, 1) + else: + return data + +def _do_decompress(file, data): + if file.endswith('.gz'): + return gzip.decompress(data) + elif file.endswith('.lz4'): + return _lz4_decompress(data) + elif file.endswith('.zst'): + return zstd.decompress(data) + else: + return data diff --git a/tests/helpers/state_helper.py b/tests/helpers/state_helper.py new file mode 100644 index 000000000..12552931a --- /dev/null +++ b/tests/helpers/state_helper.py @@ -0,0 +1,25 @@ +import re +from os import path + + +def get_program_version() -> str: + """ + Get pg_probackup version from source file /src/pg_probackup.h + value of PROGRAM_VERSION + The alternative for file /tests/expected/option_version.out + """ + probackup_h_path = '../../src/pg_probackup.h' + probackup_h_full_path = path.join(path.dirname(__file__), probackup_h_path) + define_sub = "#define PROGRAM_VERSION" + try: + with open(probackup_h_full_path, 'r') as probackup_h: + for line in probackup_h: + clean_line = re.sub(' +', ' ', line) # Line without doubled spaces + if define_sub in clean_line: + version = re.findall(r'"([^""]+)"', clean_line)[0] # Get the value between two quotes + return str(version) + raise Exception(f"Couldn't find the line with `{define_sub}` in file `{probackup_h_full_path}` " + f"that contains version between 2 quotes") + except FileNotFoundError: + raise FileNotFoundError( + f"Couldn't get version, check that file `{probackup_h_full_path}` exists and `PROGRAM_VERSION` defined") \ No newline at end of file diff --git a/tests/helpers/validators/show_validator.py b/tests/helpers/validators/show_validator.py new file mode 100644 index 000000000..d7df177a8 --- /dev/null +++ b/tests/helpers/validators/show_validator.py @@ -0,0 +1,141 @@ +import json +from datetime import datetime +from unittest import TestCase + +from ..enums.date_time_enum import DateTimePattern + + +class ShowJsonResultValidator(TestCase): + """ + This class contains all fields from show command result in json format. + It used for more convenient way to set up and validate output results. + + If we want to check the field we should set up it using the appropriate set method + For ex: + my_validator = ShowJsonResultValidator().set_backup_mode("PAGE")\ + .set_status("OK") + + After that we can compare json result from self.pb.show command with this class using `check_show_json` method. + + For informative error output, the validator class is inherited from TestClass. It allows us to use assertEqual + and do not worry about the readability of the error result. + """ + + def __init__(self): + super().__init__() + self.backup_id = None + self.parent_backup_id = None + self.backup_mode = None + self.wal = None + self.compress_alg = None + self.compress_level = None + self.from_replica = None + self.block_size = None + self.xlog_block_size = None + self.checksum_version = None + self.program_version = None + self.server_version = None + self.current_tli = None + self.parent_tli = None + self.start_lsn = None + self.stop_lsn = None + self.start_time = None + self.end_time = None + self.end_validation_time = None + self.recovery_xid = None + self.recovery_time = None + self.data_bytes = None + self.wal_bytes = None + self.uncompressed_bytes = None + self.pgdata_bytes = None + self.primary_conninfo = None + self.status = None + self.content_crc = None + + def check_show_json(self, show_result: json): + # Check equality if the value was set + if self.backup_id: + self.assertEqual(show_result["id"], self.backup_id) + if self.parent_backup_id: + self.assertEqual(show_result["parent-backup-id"], self.parent_backup_id) + if self.backup_mode: + self.assertEqual(show_result["backup-mode"], self.backup_mode) + if self.wal: + self.assertEqual(show_result["wal"], self.wal) + if self.compress_alg: + self.assertEqual(show_result["compress-alg"], self.compress_alg) + if self.compress_level: + self.assertEqual(show_result["compress-level"], self.compress_level) + if self.from_replica: + self.assertEqual(show_result["from-replica"], self.from_replica) + if self.block_size: + self.assertEqual(show_result["block-size"], self.block_size) + if self.xlog_block_size: + self.assertEqual(show_result["xlog-block-size"], self.xlog_block_size) + if self.checksum_version: + self.assertEqual(show_result["checksum-version"], self.checksum_version) + if self.program_version: + self.assertEqual(show_result["program-version"], self.program_version) + if self.server_version: + self.assertEqual(int(show_result["server-version"]), int(self.server_version)) + if self.current_tli: + self.assertEqual(show_result["current-tli"], self.current_tli) + if self.parent_tli: + self.assertEqual(show_result["parent-tli"], self.parent_tli) + if self.start_lsn: + self.assertEqual(show_result["start-lsn"], self.start_lsn) + if self.stop_lsn: + self.assertEqual(show_result["stop-lsn"], self.stop_lsn) + if self.start_time: + self.assertEqual(show_result["start-time"], self.start_time) + if self.end_time: + self.assertEqual(show_result["end-time"], self.end_time) + if self.end_validation_time: + self.assertEqual(show_result["end-validation-time"], self.end_validation_time) + if self.recovery_xid: + self.assertEqual(show_result["recovery-xid"], self.recovery_xid) + if self.recovery_time: + self.assertEqual(show_result["recovery-time"], self.recovery_time) + if self.data_bytes: + self.assertEqual(show_result["data-bytes"], self.data_bytes) + if self.wal_bytes: + self.assertEqual(show_result["wal-bytes"], self.wal_bytes) + if self.uncompressed_bytes: + self.assertEqual(show_result["uncompressed-bytes"], self.uncompressed_bytes) + if self.pgdata_bytes: + self.assertEqual(show_result["pgdata-bytes"], self.pgdata_bytes) + if self.primary_conninfo: + self.assertEqual(show_result["primary-conninfo"], self.primary_conninfo) + if self.status: + self.assertEqual(show_result["status"], self.status) + if self.content_crc: + self.assertEqual(show_result["content-crc"], self.content_crc) + + # Sanity checks + + start_time = self.str_time_to_datetime(show_result["start-time"]) + end_time = self.str_time_to_datetime(show_result["end-time"]) + end_validation_time = self.str_time_to_datetime(show_result["end-validation-time"]) + self.assertLessEqual(start_time, end_time) + self.assertLessEqual(end_time, end_validation_time) + + recovery_time = datetime.strptime(show_result["recovery-time"] + '00', DateTimePattern.Y_m_d_H_M_S_f_z_dash.value) + self.assertLessEqual(start_time, recovery_time) + + data_bytes = show_result["data-bytes"] + self.assertTrue(data_bytes > 0) + + wal_bytes = show_result["wal-bytes"] + self.assertTrue(wal_bytes > 0) + + pgdata_bytes = show_result["pgdata-bytes"] + self.assertTrue(pgdata_bytes > 0) + + @staticmethod + def str_time_to_datetime(time: str): + """ + Convert string time from pg_probackup to datetime format + String '00' was added because '%z' works with 4 digits values (like +0100), but from pg_probackup we get only + 2 digits timezone value (like +01). Because of that we should add additional '00' in the end + """ + return datetime.strptime(time + '00', str(DateTimePattern.Y_m_d_H_M_S_z_dash.value)) diff --git a/tests/incr_restore_test.py b/tests/incr_restore_test.py index f17ee95d1..08bc7b9ad 100644 --- a/tests/incr_restore_test.py +++ b/tests/incr_restore_test.py @@ -1,35 +1,29 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest +from pg_probackup2.gdb import needs_gdb import subprocess -from datetime import datetime -import sys -from time import sleep -from datetime import datetime, timedelta -import hashlib import shutil import json -from testgres import QueryException +from testgres import QueryException, StartNodeException -class IncrRestoreTest(ProbackupTest, unittest.TestCase): +class IncrRestoreTest(ProbackupTest): # @unittest.skip("skip") def test_basic_incr_restore(self): """incremental restore in CHECKSUM mode""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=50) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -37,7 +31,7 @@ def test_basic_incr_restore(self): pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -45,7 +39,7 @@ def test_basic_incr_restore(self): pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -53,7 +47,7 @@ def test_basic_incr_restore(self): pgbench.wait() pgbench.stdout.close() - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) @@ -65,8 +59,7 @@ def test_basic_incr_restore(self): node.stop() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -75,19 +68,17 @@ def test_basic_incr_restore(self): # @unittest.skip("skip") def test_basic_incr_restore_into_missing_directory(self): """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=10) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -95,7 +86,7 @@ def test_basic_incr_restore_into_missing_directory(self): pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -103,14 +94,13 @@ def test_basic_incr_restore_into_missing_directory(self): pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -120,19 +110,17 @@ def test_basic_incr_restore_into_missing_directory(self): def test_checksum_corruption_detection(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=10) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -140,7 +128,7 @@ def test_checksum_corruption_detection(self): pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -148,7 +136,7 @@ def test_checksum_corruption_detection(self): pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -156,14 +144,13 @@ def test_checksum_corruption_detection(self): pgbench.wait() pgbench.stdout.close() - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) node.stop() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=lsn"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -173,34 +160,31 @@ def test_checksum_corruption_detection(self): def test_incr_restore_with_tablespace(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) tblspace = self.get_tblspace_path(node, 'tblspace') some_directory = self.get_tblspace_path(node, 'some_directory') # stuff new destination with garbage - self.restore_node(backup_dir, 'node', node, data_dir=some_directory) + self.pb.restore_node('node', restore_dir=some_directory) self.create_tblspace_in_node(node, 'tblspace') node.pgbench_init(scale=10, tablespace='tblspace') - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) node.stop() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ "-j", "4", "--incremental-mode=checksum", "--force", "-T{0}={1}".format(tblspace, some_directory)]) @@ -211,27 +195,25 @@ def test_incr_restore_with_tablespace(self): # @unittest.skip("skip") def test_incr_restore_with_tablespace_1(self): """recovery to target timeline""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) tblspace = self.get_tblspace_path(node, 'tblspace') some_directory = self.get_tblspace_path(node, 'some_directory') - self.restore_node(backup_dir, 'node', node, data_dir=some_directory) + self.pb.restore_node('node', restore_dir=some_directory) self.create_tblspace_in_node(node, 'tblspace') node.pgbench_init(scale=10, tablespace='tblspace') - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -239,8 +221,7 @@ def test_incr_restore_with_tablespace_1(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--stream']) + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -248,15 +229,13 @@ def test_incr_restore_with_tablespace_1(self): pgbench.wait() pgbench.stdout.close() - self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--stream']) + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) node.stop() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -268,25 +247,20 @@ def test_incr_restore_with_tablespace_2(self): If "--tablespace-mapping" option is used with incremental restore, then new directory must be empty. """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1 = self.pg_node.make_simple('node_1') # fill node1 with data - out = self.restore_node( - backup_dir, 'node', node, - data_dir=node_1.data_dir, + out = self.pb.restore_node('node', node_1, options=['--incremental-mode=checksum', '--force']) self.assertIn("WARNING: Backup catalog was initialized for system id", out) @@ -299,31 +273,17 @@ def test_incr_restore_with_tablespace_2(self): 'postgres', 'vacuum') - self.backup_node(backup_dir, 'node', node, backup_type='delta', options=['--stream']) + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - try: - self.restore_node( - backup_dir, 'node', node, - data_dir=node_1.data_dir, - options=['--incremental-mode=checksum', '-T{0}={1}'.format(tblspace, tblspace)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because remapped directory is not empty.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Remapped tablespace destination is not empty', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_1, + options=['--incremental-mode=checksum', + '-T{0}={1}'.format(tblspace, tblspace)], + expect_error="because remapped directory is not empty") + self.assertMessage(contains='ERROR: Remapped tablespace destination is not empty') - out = self.restore_node( - backup_dir, 'node', node, - data_dir=node_1.data_dir, + out = self.pb.restore_node('node', node_1, options=[ '--force', '--incremental-mode=checksum', '-T{0}={1}'.format(tblspace, tblspace)]) @@ -335,21 +295,19 @@ def test_incr_restore_with_tablespace_2(self): def test_incr_restore_with_tablespace_3(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'tblspace1') node.pgbench_init(scale=10, tablespace='tblspace1') # take backup with tblspace1 - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) self.drop_tblspace(node, 'tblspace1') @@ -359,8 +317,7 @@ def test_incr_restore_with_tablespace_3(self): node.stop() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ "-j", "4", "--incremental-mode=checksum"]) @@ -373,63 +330,40 @@ def test_incr_restore_with_tablespace_4(self): """ Check that system ID mismatch is detected, """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'tblspace1') node.pgbench_init(scale=10, tablespace='tblspace1') # take backup of node1 with tblspace1 - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) self.drop_tblspace(node, 'tblspace1') node.cleanup() # recreate node - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) node.slow_start() self.create_tblspace_in_node(node, 'tblspace1') node.pgbench_init(scale=10, tablespace='tblspace1') node.stop() - try: - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--incremental-mode=checksum"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because destination directory has wrong system id.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup catalog was initialized for system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Incremental restore is not allowed', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + options=["-j", "4", "--incremental-mode=checksum"], + expect_error="because destination directory has wrong system id") + self.assertMessage(contains='WARNING: Backup catalog was initialized for system id') + self.assertMessage(contains='ERROR: Incremental restore is not allowed') - out = self.restore_node( - backup_dir, 'node', node, + out = self.pb.restore_node('node', node, options=[ "-j", "4", "--force", "--incremental-mode=checksum"]) @@ -446,30 +380,26 @@ def test_incr_restore_with_tablespace_5(self): with some old content, that belongs to an instance with different system id. """ - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums']) + node1 = self.pg_node.make_simple('node1', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node1) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node1) node1.slow_start() self.create_tblspace_in_node(node1, 'tblspace') node1.pgbench_init(scale=10, tablespace='tblspace') # take backup of node1 with tblspace - self.backup_node(backup_dir, 'node', node1, options=['--stream']) + self.pb.backup_node('node', node1, options=['--stream']) pgdata = self.pgdata_content(node1.data_dir) node1.stop() # recreate node - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2'), - set_replication=True, - initdb_params=['--data-checksums']) + node2 = self.pg_node.make_simple('node2', + set_replication=True) node2.slow_start() self.create_tblspace_in_node(node2, 'tblspace') @@ -479,8 +409,7 @@ def test_incr_restore_with_tablespace_5(self): tblspc1_path = self.get_tblspace_path(node1, 'tblspace') tblspc2_path = self.get_tblspace_path(node2, 'tblspace') - out = self.restore_node( - backup_dir, 'node', node1, + out = self.pb.restore_node('node', node1, options=[ "-j", "4", "--force", "--incremental-mode=checksum", @@ -499,47 +428,30 @@ def test_incr_restore_with_tablespace_6(self): """ Empty pgdata, not empty tablespace """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'tblspace') node.pgbench_init(scale=10, tablespace='tblspace') # take backup of node with tblspace - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - node.cleanup() + node.stop() + shutil.rmtree(node.data_dir) - try: - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", - "--incremental-mode=checksum"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because there is running postmaster " - "process in destination directory.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: PGDATA is empty, but tablespace destination is not', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + options=["-j", "4", "--incremental-mode=checksum"], + expect_error="because there is running postmaster") + self.assertMessage(contains='ERROR: PGDATA is empty, but tablespace destination is not') - out = self.restore_node( - backup_dir, 'node', node, + out = self.pb.restore_node('node', node, options=[ "-j", "4", "--force", "--incremental-mode=checksum"]) @@ -557,46 +469,23 @@ def test_incr_restore_with_tablespace_7(self): Restore backup without tablespace into PGDATA with tablespace. """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # take backup of node with tblspace - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) self.create_tblspace_in_node(node, 'tblspace') node.pgbench_init(scale=5, tablespace='tblspace') node.stop() -# try: -# self.restore_node( -# backup_dir, 'node', node, -# options=[ -# "-j", "4", -# "--incremental-mode=checksum"]) -# # we should die here because exception is what we expect to happen -# self.assertEqual( -# 1, 0, -# "Expecting Error because there is running postmaster " -# "process in destination directory.\n " -# "Output: {0} \n CMD: {1}".format( -# repr(self.output), self.cmd)) -# except ProbackupException as e: -# self.assertIn( -# 'ERROR: PGDATA is empty, but tablespace destination is not', -# e.message, -# '\n Unexpected Error Message: {0}\n CMD: {1}'.format( -# repr(e.message), self.cmd)) - - out = self.restore_node( - backup_dir, 'node', node, + out = self.pb.restore_node('node', node, options=[ "-j", "4", "--incremental-mode=checksum"]) @@ -606,65 +495,29 @@ def test_incr_restore_with_tablespace_7(self): # @unittest.skip("skip") def test_basic_incr_restore_sanity(self): """recovery to target timeline""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) - try: - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4", "--incremental-mode=checksum"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because there is running postmaster " - "process in destination directory.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Postmaster with pid', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Incremental restore is not allowed', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + options=["-j", "4", "--incremental-mode=checksum"], + expect_error="because there is running postmaster") + self.assertMessage(contains='WARNING: Postmaster with pid') + self.assertMessage(contains='ERROR: Incremental restore is not allowed') - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1 = self.pg_node.make_simple('node_1') - try: - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir, - options=["-j", "4", "--incremental-mode=checksum"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because destination directory has wrong system id.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup catalog was initialized for system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Incremental restore is not allowed', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_1, + options=["-j", "4", "--incremental-mode=checksum"], + expect_error="because destination directory has wrong system id") + self.assertMessage(contains='WARNING: Backup catalog was initialized for system id') + self.assertMessage(contains='ERROR: Incremental restore is not allowed') # @unittest.skip("skip") def test_incr_checksum_restore(self): @@ -674,24 +527,22 @@ def test_incr_checksum_restore(self): X - is instance, we want to return it to C state. """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=50) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() @@ -705,17 +556,15 @@ def test_incr_checksum_restore(self): pgbench.wait() node.stop(['-m', 'immediate', '-D', node.data_dir]) - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1 = self.pg_node.make_simple('node_1') node_1.cleanup() - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir, + self.pb.restore_node('node', node_1, options=[ '--recovery-target-action=promote', '--recovery-target-xid={0}'.format(xid)]) - self.set_auto_conf(node_1, {'port': node_1.port}) + node_1.set_auto_conf({'port': node_1.port}) node_1.slow_start() # /-- @@ -725,7 +574,7 @@ def test_incr_checksum_restore(self): # /--C # --A-----B----*----X - self.backup_node(backup_dir, 'node', node_1, + self.pb.backup_node('node', node_1, data_dir=node_1.data_dir, backup_type='page') # /--C------ @@ -733,23 +582,30 @@ def test_incr_checksum_restore(self): pgbench = node_1.pgbench(options=['-T', '50', '-c', '1']) pgbench.wait() + checksums = node_1.pgbench_table_checksums() + # /--C------D # --A-----B----*----X - self.backup_node(backup_dir, 'node', node_1, + self.pb.backup_node('node', node_1, data_dir=node_1.data_dir, backup_type='page') + node_1.stop() + pgdata = self.pgdata_content(node_1.data_dir) - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) - self.set_auto_conf(node, {'port': node.port}) + node.set_auto_conf({'port': node.port}) node.slow_start() - self.compare_pgdata(pgdata, pgdata_restored) + checksums_restored = node.pgbench_table_checksums() + + if checksums != checksums_restored: + self.compare_pgdata(pgdata, pgdata_restored) + self.assertEqual(checksums, checksums_restored) # @unittest.skip("skip") @@ -760,24 +616,22 @@ def test_incr_lsn_restore(self): X - is instance, we want to return it to C state. """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=50) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() @@ -791,17 +645,15 @@ def test_incr_lsn_restore(self): pgbench.wait() node.stop(['-m', 'immediate', '-D', node.data_dir]) - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1 = self.pg_node.make_simple('node_1') node_1.cleanup() - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir, + self.pb.restore_node('node', node_1, options=[ '--recovery-target-action=promote', '--recovery-target-xid={0}'.format(xid)]) - self.set_auto_conf(node_1, {'port': node_1.port}) + node_1.set_auto_conf({'port': node_1.port}) node_1.slow_start() # /-- @@ -811,7 +663,7 @@ def test_incr_lsn_restore(self): # /--C # --A-----B----*----X - self.backup_node(backup_dir, 'node', node_1, + self.pb.backup_node('node', node_1, data_dir=node_1.data_dir, backup_type='page') # /--C------ @@ -819,22 +671,29 @@ def test_incr_lsn_restore(self): pgbench = node_1.pgbench(options=['-T', '50', '-c', '1']) pgbench.wait() + checksums = node_1.pgbench_table_checksums() + # /--C------D # --A-----B----*----X - self.backup_node(backup_dir, 'node', node_1, + self.pb.backup_node('node', node_1, data_dir=node_1.data_dir, backup_type='page') + node_1.stop() + pgdata = self.pgdata_content(node_1.data_dir) - self.restore_node( - backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn"]) + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=lsn"]) pgdata_restored = self.pgdata_content(node.data_dir) - self.set_auto_conf(node, {'port': node.port}) + node.set_auto_conf({'port': node.port}) node.slow_start() - self.compare_pgdata(pgdata, pgdata_restored) + checksums_restored = node.pgbench_table_checksums() + + if checksums != checksums_restored: + self.compare_pgdata(pgdata, pgdata_restored) + self.assertEqual(checksums, checksums_restored) # @unittest.skip("skip") def test_incr_lsn_sanity(self): @@ -845,61 +704,45 @@ def test_incr_lsn_sanity(self): X - is instance, we want to return it to state B. fail is expected behaviour in case of lsn restore. """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=10) - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1 = self.pg_node.make_simple('node_1') node_1.cleanup() - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir) + self.pb.restore_node('node', node=node_1) - self.set_auto_conf(node_1, {'port': node_1.port}) + node_1.set_auto_conf({'port': node_1.port}) node_1.slow_start() pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - self.backup_node(backup_dir, 'node', node_1, + self.pb.backup_node('node', node_1, data_dir=node_1.data_dir, backup_type='full') pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - page_id = self.backup_node(backup_dir, 'node', node_1, + page_id = self.pb.backup_node('node', node_1, data_dir=node_1.data_dir, backup_type='page') node.stop() - try: - self.restore_node( - backup_dir, 'node', node, data_dir=node.data_dir, - options=["-j", "4", "--incremental-mode=lsn"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental restore in lsn mode is impossible\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Cannot perform incremental restore of " - "backup chain {0} in 'lsn' mode".format(page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + options=["-j", "4", "--incremental-mode=lsn"], + expect_error="because incremental restore in lsn mode is impossible") + self.assertMessage(contains="ERROR: Cannot perform incremental restore of " + "backup chain {0} in 'lsn' mode".format(page_id)) # @unittest.skip("skip") def test_incr_checksum_sanity(self): @@ -909,46 +752,41 @@ def test_incr_checksum_sanity(self): X - is instance, we want to return it to state B. """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=20) - node_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_1')) + node_1 = self.pg_node.make_simple('node_1') node_1.cleanup() - self.restore_node( - backup_dir, 'node', node_1, data_dir=node_1.data_dir) + self.pb.restore_node('node', node=node_1) - self.set_auto_conf(node_1, {'port': node_1.port}) + node_1.set_auto_conf({'port': node_1.port}) node_1.slow_start() pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - self.backup_node(backup_dir, 'node', node_1, + self.pb.backup_node('node', node_1, data_dir=node_1.data_dir, backup_type='full') pgbench = node_1.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - page_id = self.backup_node(backup_dir, 'node', node_1, + page_id = self.pb.backup_node('node', node_1, data_dir=node_1.data_dir, backup_type='page') pgdata = self.pgdata_content(node_1.data_dir) node.stop() - self.restore_node( - backup_dir, 'node', node, data_dir=node.data_dir, + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -960,24 +798,20 @@ def test_incr_checksum_corruption_detection(self): """ check that corrupted page got detected and replaced """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), -# initdb_params=['--data-checksums'], - pg_options={'wal_log_hints': 'on'}) + node = self.pg_node.make_simple('node', checksum=False, pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=20) pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - self.backup_node(backup_dir, 'node', node, + self.pb.backup_node('node', node, data_dir=node.data_dir, backup_type='full') heap_path = node.safe_psql( @@ -987,7 +821,7 @@ def test_incr_checksum_corruption_detection(self): pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - page_id = self.backup_node(backup_dir, 'node', node, + page_id = self.pb.backup_node('node', node, data_dir=node.data_dir, backup_type='page') pgdata = self.pgdata_content(node.data_dir) @@ -1001,8 +835,7 @@ def test_incr_checksum_corruption_detection(self): f.flush() f.close - self.restore_node( - backup_dir, 'node', node, data_dir=node.data_dir, + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=checksum"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -1014,24 +847,22 @@ def test_incr_lsn_corruption_detection(self): """ check that corrupted page got detected and replaced """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=20) pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - self.backup_node(backup_dir, 'node', node, + self.pb.backup_node('node', node, data_dir=node.data_dir, backup_type='full') heap_path = node.safe_psql( @@ -1041,7 +872,7 @@ def test_incr_lsn_corruption_detection(self): pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - page_id = self.backup_node(backup_dir, 'node', node, + page_id = self.pb.backup_node('node', node, data_dir=node.data_dir, backup_type='page') pgdata = self.pgdata_content(node.data_dir) @@ -1055,8 +886,7 @@ def test_incr_lsn_corruption_detection(self): f.flush() f.close - self.restore_node( - backup_dir, 'node', node, data_dir=node.data_dir, + self.pb.restore_node('node', node, options=["-j", "4", "--incremental-mode=lsn"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -1067,15 +897,13 @@ def test_incr_lsn_corruption_detection(self): # @unittest.expectedFailure def test_incr_restore_multiple_external(self): """check that cmdline has priority over config""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() external_dir1 = self.get_tblspace_path(node, 'external_dir1') @@ -1083,35 +911,27 @@ def test_incr_restore_multiple_external(self): # FULL backup node.pgbench_init(scale=20) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4"]) # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=['-E{0}{1}{2}'.format( external_dir1, self.EXTERNAL_DIRECTORY_DELIMITER, external_dir2)]) # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='full', options=["-j", "4"]) pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='page', options=["-j", "4"]) pgdata = self.pgdata_content( @@ -1122,8 +942,7 @@ def test_incr_restore_multiple_external(self): node.stop() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node=node, options=["-j", "4", '--incremental-mode=checksum']) pgdata_restored = self.pgdata_content( @@ -1134,15 +953,13 @@ def test_incr_restore_multiple_external(self): # @unittest.expectedFailure def test_incr_lsn_restore_multiple_external(self): """check that cmdline has priority over config""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() external_dir1 = self.get_tblspace_path(node, 'external_dir1') @@ -1150,35 +967,27 @@ def test_incr_lsn_restore_multiple_external(self): # FULL backup node.pgbench_init(scale=20) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4"]) # fill external directories with data - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir1, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir1, options=["-j", "4"]) - self.restore_node( - backup_dir, 'node', node, - data_dir=external_dir2, options=["-j", "4"]) + self.pb.restore_node('node', restore_dir=external_dir2, options=["-j", "4"]) - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=['-E{0}{1}{2}'.format( external_dir1, self.EXTERNAL_DIRECTORY_DELIMITER, external_dir2)]) # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='full', options=["-j", "4"]) pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() # cmdline option MUST override options in config - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='page', options=["-j", "4"]) pgdata = self.pgdata_content( @@ -1189,8 +998,7 @@ def test_incr_lsn_restore_multiple_external(self): node.stop() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=["-j", "4", '--incremental-mode=lsn']) pgdata_restored = self.pgdata_content( @@ -1202,22 +1010,20 @@ def test_incr_lsn_restore_multiple_external(self): def test_incr_lsn_restore_backward(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on', 'hot_standby': 'on'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup node.pgbench_init(scale=2) - full_id = self.backup_node( - backup_dir, 'node', node, + full_checksums = node.pgbench_table_checksums() + full_id = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4"]) full_pgdata = self.pgdata_content(node.data_dir) @@ -1225,8 +1031,8 @@ def test_incr_lsn_restore_backward(self): pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - page_id = self.backup_node( - backup_dir, 'node', node, + page_checksums = node.pgbench_table_checksums() + page_id = self.pb.backup_node('node', node, backup_type='page', options=["-j", "4"]) page_pgdata = self.pgdata_content(node.data_dir) @@ -1234,8 +1040,8 @@ def test_incr_lsn_restore_backward(self): pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - delta_id = self.backup_node( - backup_dir, 'node', node, + delta_checksums = node.pgbench_table_checksums() + delta_id = self.pb.backup_node('node', node, backup_type='delta', options=["-j", "4"]) delta_pgdata = self.pgdata_content(node.data_dir) @@ -1245,8 +1051,7 @@ def test_incr_lsn_restore_backward(self): node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, + self.pb.restore_node('node', node, backup_id=full_id, options=[ "-j", "4", '--incremental-mode=lsn', @@ -1254,44 +1059,38 @@ def test_incr_lsn_restore_backward(self): '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(full_pgdata, pgdata_restored) node.slow_start(replica=True) + checksums_restored = node.pgbench_table_checksums() node.stop() - try: - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, - options=[ - "-j", "4", '--incremental-mode=lsn', - '--recovery-target=immediate', '--recovery-target-action=pause']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental restore in lsn mode is impossible\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "Cannot perform incremental restore of backup chain", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + if full_checksums != checksums_restored: + self.compare_pgdata(full_pgdata, pgdata_restored) + self.assertEqual(full_checksums, checksums_restored) + + self.pb.restore_node('node', node=node, backup_id=page_id, + options=["-j", "4", '--incremental-mode=lsn', + '--recovery-target=immediate', + '--recovery-target-action=pause'], + expect_error="because incremental restore in lsn mode is impossible") + self.assertMessage(contains="Cannot perform incremental restore of backup chain") - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, + self.pb.restore_node('node', node, backup_id=page_id, options=[ "-j", "4", '--incremental-mode=checksum', '--recovery-target=immediate', '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(page_pgdata, pgdata_restored) node.slow_start(replica=True) + checksums_restored = node.pgbench_table_checksums() node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=delta_id, + if page_checksums != checksums_restored: + self.compare_pgdata(page_pgdata, pgdata_restored) + self.assertEqual(page_checksums, checksums_restored) + + self.pb.restore_node('node', node, backup_id=delta_id, options=[ "-j", "4", '--incremental-mode=lsn', @@ -1299,30 +1098,35 @@ def test_incr_lsn_restore_backward(self): '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(delta_pgdata, pgdata_restored) + + node.slow_start(replica=True) + checksums_restored = node.pgbench_table_checksums() + node.stop() + + if delta_checksums != checksums_restored: + self.compare_pgdata(delta_pgdata, pgdata_restored) + self.assertEqual(delta_checksums, checksums_restored) # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_checksum_restore_backward(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'hot_standby': 'on'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup node.pgbench_init(scale=20) - full_id = self.backup_node( - backup_dir, 'node', node, + full_checksums = node.pgbench_table_checksums() + full_id = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4"]) full_pgdata = self.pgdata_content(node.data_dir) @@ -1330,8 +1134,8 @@ def test_incr_checksum_restore_backward(self): pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - page_id = self.backup_node( - backup_dir, 'node', node, + page_checksums = node.pgbench_table_checksums() + page_id = self.pb.backup_node('node', node, backup_type='page', options=["-j", "4"]) page_pgdata = self.pgdata_content(node.data_dir) @@ -1339,8 +1143,8 @@ def test_incr_checksum_restore_backward(self): pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - delta_id = self.backup_node( - backup_dir, 'node', node, + delta_checksums = node.pgbench_table_checksums() + delta_id = self.pb.backup_node('node', node, backup_type='delta', options=["-j", "4"]) delta_pgdata = self.pgdata_content(node.data_dir) @@ -1350,8 +1154,7 @@ def test_incr_checksum_restore_backward(self): node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, + self.pb.restore_node('node', node, backup_id=full_id, options=[ "-j", "4", '--incremental-mode=checksum', @@ -1359,13 +1162,16 @@ def test_incr_checksum_restore_backward(self): '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(full_pgdata, pgdata_restored) node.slow_start(replica=True) + checksums_restored = node.pgbench_table_checksums() node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, + if full_checksums != checksums_restored: + self.compare_pgdata(full_pgdata, pgdata_restored) + self.assertEqual(full_checksums, checksums_restored) + + self.pb.restore_node('node', node, backup_id=page_id, options=[ "-j", "4", '--incremental-mode=checksum', @@ -1373,13 +1179,16 @@ def test_incr_checksum_restore_backward(self): '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(page_pgdata, pgdata_restored) node.slow_start(replica=True) + checksums_restored = node.pgbench_table_checksums() node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=delta_id, + if page_checksums != checksums_restored: + self.compare_pgdata(page_pgdata, pgdata_restored) + self.assertEqual(page_checksums, checksums_restored) + + self.pb.restore_node('node', node, backup_id=delta_id, options=[ "-j", "4", '--incremental-mode=checksum', @@ -1387,37 +1196,36 @@ def test_incr_checksum_restore_backward(self): '--recovery-target-action=pause']) pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(delta_pgdata, pgdata_restored) + + node.slow_start(replica=True) + checksums_restored = node.pgbench_table_checksums() + node.stop() + + if delta_checksums != checksums_restored: + self.compare_pgdata(delta_pgdata, pgdata_restored) + self.assertEqual(delta_checksums, checksums_restored) # @unittest.skip("skip") def test_make_replica_via_incr_checksum_restore(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master, replica=True) + self.pb.init() + self.pb.add_instance('node', master) + self.pb.set_archiving('node', master, replica=True) master.slow_start() - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() master.pgbench_init(scale=20) - self.backup_node(backup_dir, 'node', master) + self.pb.backup_node('node', master) - self.restore_node( - backup_dir, 'node', replica, options=['-R']) + self.pb.restore_node('node', replica, options=['-R']) # Settings for Replica self.set_replica(master, replica, synchronous=False) @@ -1441,13 +1249,11 @@ def test_make_replica_via_incr_checksum_restore(self): pgbench.wait() # take backup from new master - self.backup_node( - backup_dir, 'node', new_master, + self.pb.backup_node('node', new_master, data_dir=new_master.data_dir, backup_type='page') # restore old master as replica - self.restore_node( - backup_dir, 'node', old_master, data_dir=old_master.data_dir, + self.pb.restore_node('node', old_master, options=['-R', '--incremental-mode=checksum']) self.set_replica(new_master, old_master, synchronous=True) @@ -1461,31 +1267,23 @@ def test_make_replica_via_incr_checksum_restore(self): def test_make_replica_via_incr_lsn_restore(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master, replica=True) + self.pb.init() + self.pb.add_instance('node', master) + self.pb.set_archiving('node', master, replica=True) master.slow_start() - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() master.pgbench_init(scale=20) - self.backup_node(backup_dir, 'node', master) + self.pb.backup_node('node', master) - self.restore_node( - backup_dir, 'node', replica, options=['-R']) + self.pb.restore_node('node', replica, options=['-R']) # Settings for Replica self.set_replica(master, replica, synchronous=False) @@ -1509,13 +1307,11 @@ def test_make_replica_via_incr_lsn_restore(self): pgbench.wait() # take backup from new master - self.backup_node( - backup_dir, 'node', new_master, + self.pb.backup_node('node', new_master, data_dir=new_master.data_dir, backup_type='page') # restore old master as replica - self.restore_node( - backup_dir, 'node', old_master, data_dir=old_master.data_dir, + self.pb.restore_node('node', old_master, options=['-R', '--incremental-mode=lsn']) self.set_replica(new_master, old_master, synchronous=True) @@ -1530,14 +1326,13 @@ def test_make_replica_via_incr_lsn_restore(self): def test_incr_checksum_long_xact(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1558,12 +1353,10 @@ def test_incr_checksum_long_xact(self): con.execute("INSERT INTO t1 values (2)") con2.execute("INSERT INTO t1 values (3)") - full_id = self.backup_node( - backup_dir, 'node', node, + full_id = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) con.commit() @@ -1579,8 +1372,7 @@ def test_incr_checksum_long_xact(self): node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, + self.pb.restore_node('node', node, backup_id=full_id, options=["-j", "4", '--incremental-mode=checksum']) node.slow_start() @@ -1593,20 +1385,21 @@ def test_incr_checksum_long_xact(self): # @unittest.skip("skip") # @unittest.expectedFailure - # This test will pass with Enterprise - # because it has checksums enabled by default - @unittest.skipIf(ProbackupTest.enterprise, 'skip') def test_incr_lsn_long_xact_1(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + if self.pgpro and not self.shardman: + initdb_params = ['--no-data-checksums'] + else: + initdb_params = [] + node = self.pg_node.make_simple('node', + set_replication=True, + initdb_params=initdb_params, + checksum=False) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1627,12 +1420,10 @@ def test_incr_lsn_long_xact_1(self): con.execute("INSERT INTO t1 values (2)") con2.execute("INSERT INTO t1 values (3)") - full_id = self.backup_node( - backup_dir, 'node', node, + full_id = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) con.commit() @@ -1649,41 +1440,28 @@ def test_incr_lsn_long_xact_1(self): node.stop() - try: - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, - options=["-j", "4", '--incremental-mode=lsn']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because incremental restore in lsn mode is impossible\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Incremental restore in 'lsn' mode require data_checksums to be " - "enabled in destination data directory", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, backup_id=full_id, + options=["-j", "4", '--incremental-mode=lsn'], + expect_error="because incremental restore in lsn mode is impossible") + self.assertMessage(contains="ERROR: Incremental restore in 'lsn' mode " + "require data_checksums to be " + "enabled in destination data directory") # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_lsn_long_xact_2(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'full_page_writes': 'off', 'wal_log_hints': 'off'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1704,12 +1482,10 @@ def test_incr_lsn_long_xact_2(self): con.execute("INSERT INTO t1 values (2)") con2.execute("INSERT INTO t1 values (3)") - full_id = self.backup_node( - backup_dir, 'node', node, + full_id = self.pb.backup_node('node', node, backup_type="full", options=["-j", "4", "--stream"]) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) # print(node.safe_psql( @@ -1738,8 +1514,7 @@ def test_incr_lsn_long_xact_2(self): node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, + self.pb.restore_node('node', node, backup_id=full_id, options=["-j", "4", '--incremental-mode=lsn']) node.slow_start() @@ -1755,14 +1530,12 @@ def test_incr_lsn_long_xact_2(self): def test_incr_restore_zero_size_file_checksum(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() fullpath = os.path.join(node.data_dir, 'simple_file') @@ -1771,8 +1544,7 @@ def test_incr_restore_zero_size_file_checksum(self): f.close # FULL backup - id1 = self.backup_node( - backup_dir, 'node', node, + id1 = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) pgdata1 = self.pgdata_content(node.data_dir) @@ -1783,37 +1555,32 @@ def test_incr_restore_zero_size_file_checksum(self): f.flush() f.close - id2 = self.backup_node( - backup_dir, 'node', node, + id2 = self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) pgdata2 = self.pgdata_content(node.data_dir) with open(fullpath, "w") as f: f.close() - id3 = self.backup_node( - backup_dir, 'node', node, + id3 = self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) pgdata3 = self.pgdata_content(node.data_dir) node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=id1, + self.pb.restore_node('node', node, backup_id=id1, options=["-j", "4", '-I', 'checksum']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata1, pgdata_restored) - self.restore_node( - backup_dir, 'node', node, backup_id=id2, + self.pb.restore_node('node', node, backup_id=id2, options=["-j", "4", '-I', 'checksum']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata2, pgdata_restored) - self.restore_node( - backup_dir, 'node', node, backup_id=id3, + self.pb.restore_node('node', node, backup_id=id3, options=["-j", "4", '-I', 'checksum']) pgdata_restored = self.pgdata_content(node.data_dir) @@ -1824,14 +1591,12 @@ def test_incr_restore_zero_size_file_checksum(self): def test_incr_restore_zero_size_file_lsn(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() fullpath = os.path.join(node.data_dir, 'simple_file') @@ -1840,8 +1605,7 @@ def test_incr_restore_zero_size_file_lsn(self): f.close # FULL backup - id1 = self.backup_node( - backup_dir, 'node', node, + id1 = self.pb.backup_node('node', node, options=["-j", "4", "--stream"]) pgdata1 = self.pgdata_content(node.data_dir) @@ -1852,23 +1616,20 @@ def test_incr_restore_zero_size_file_lsn(self): f.flush() f.close - id2 = self.backup_node( - backup_dir, 'node', node, + id2 = self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) pgdata2 = self.pgdata_content(node.data_dir) with open(fullpath, "w") as f: f.close() - id3 = self.backup_node( - backup_dir, 'node', node, + id3 = self.pb.backup_node('node', node, backup_type="delta", options=["-j", "4", "--stream"]) pgdata3 = self.pgdata_content(node.data_dir) node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=id1, + self.pb.restore_node('node', node, backup_id=id1, options=["-j", "4", '-I', 'checksum']) pgdata_restored = self.pgdata_content(node.data_dir) @@ -1877,8 +1638,7 @@ def test_incr_restore_zero_size_file_lsn(self): node.slow_start() node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=id2, + self.pb.restore_node('node', node, backup_id=id2, options=["-j", "4", '-I', 'checksum']) pgdata_restored = self.pgdata_content(node.data_dir) @@ -1887,8 +1647,7 @@ def test_incr_restore_zero_size_file_lsn(self): node.slow_start() node.stop() - self.restore_node( - backup_dir, 'node', node, backup_id=id3, + self.pb.restore_node('node', node, backup_id=id3, options=["-j", "4", '-I', 'checksum']) pgdata_restored = self.pgdata_content(node.data_dir) @@ -1896,14 +1655,12 @@ def test_incr_restore_zero_size_file_lsn(self): def test_incremental_partial_restore_exclude_checksum(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() for i in range(1, 10, 1): @@ -1926,30 +1683,27 @@ def test_incremental_partial_restore_exclude_checksum(self): node.pgbench_init(scale=20) # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgdata = self.pgdata_content(node.data_dir) pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() # PAGE backup - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') # restore FULL backup into second node2 - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1')) + node1 = self.pg_node.make_simple('node1') node1.cleanup() - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.pg_node.make_simple('node2') node2.cleanup() # restore some data into node2 - self.restore_node(backup_dir, 'node', node2) + self.pb.restore_node('node', node2) # partial restore backup into node1 - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node1, options=[ "--db-exclude=db1", "--db-exclude=db5"]) @@ -1957,8 +1711,7 @@ def test_incremental_partial_restore_exclude_checksum(self): pgdata1 = self.pgdata_content(node1.data_dir) # partial incremental restore backup into node2 - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node2, options=[ "--db-exclude=db1", "--db-exclude=db5", @@ -1970,7 +1723,7 @@ def test_incremental_partial_restore_exclude_checksum(self): self.compare_pgdata(pgdata1, pgdata2) - self.set_auto_conf(node2, {'port': node2.port}) + node2.set_auto_conf({'port': node2.port}) node2.slow_start() @@ -1999,14 +1752,12 @@ def test_incremental_partial_restore_exclude_checksum(self): def test_incremental_partial_restore_exclude_lsn(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() for i in range(1, 10, 1): @@ -2029,32 +1780,29 @@ def test_incremental_partial_restore_exclude_lsn(self): node.pgbench_init(scale=20) # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgdata = self.pgdata_content(node.data_dir) pgbench = node.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() # PAGE backup - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') node.stop() # restore FULL backup into second node2 - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1')) + node1 = self.pg_node.make_simple('node1') node1.cleanup() - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.pg_node.make_simple('node2') node2.cleanup() # restore some data into node2 - self.restore_node(backup_dir, 'node', node2) + self.pb.restore_node('node', node2) # partial restore backup into node1 - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node1, options=[ "--db-exclude=db1", "--db-exclude=db5"]) @@ -2065,8 +1813,7 @@ def test_incremental_partial_restore_exclude_lsn(self): node2.port = node.port node2.slow_start() node2.stop() - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node2, options=[ "--db-exclude=db1", "--db-exclude=db5", @@ -2078,7 +1825,7 @@ def test_incremental_partial_restore_exclude_lsn(self): self.compare_pgdata(pgdata1, pgdata2) - self.set_auto_conf(node2, {'port': node2.port}) + node2.set_auto_conf({'port': node2.port}) node2.slow_start() @@ -2107,14 +1854,12 @@ def test_incremental_partial_restore_exclude_lsn(self): def test_incremental_partial_restore_exclude_tablespace_checksum(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # cat_version = node.get_control_data()["Catalog version number"] @@ -2151,30 +1896,26 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): db_list[line['datname']] = line['oid'] # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # node1 - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1')) + node1 = self.pg_node.make_simple('node1') node1.cleanup() node1_tablespace = self.get_tblspace_path(node1, 'somedata') # node2 - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.pg_node.make_simple('node2') node2.cleanup() node2_tablespace = self.get_tblspace_path(node2, 'somedata') # in node2 restore full backup - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node2, options=[ "-T", "{0}={1}".format( node_tablespace, node2_tablespace)]) # partial restore into node1 - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node1, options=[ "--db-exclude=db1", "--db-exclude=db5", @@ -2184,31 +1925,17 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): pgdata1 = self.pgdata_content(node1.data_dir) # partial incremental restore into node2 - try: - self.restore_node( - backup_dir, 'node', - node2, options=[ - "-I", "checksum", - "--db-exclude=db1", - "--db-exclude=db5", - "-T", "{0}={1}".format( - node_tablespace, node2_tablespace), - "--destroy-all-other-dbs"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because remapped tablespace contain old data .\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Remapped tablespace destination is not empty:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node2, + options=["-I", "checksum", + "--db-exclude=db1", + "--db-exclude=db5", + "-T", "{0}={1}".format( + node_tablespace, node2_tablespace), + "--destroy-all-other-dbs",], + expect_error="because remapped tablespace contain old data") + self.assertMessage(contains='ERROR: Remapped tablespace destination is not empty:') + + self.pb.restore_node('node', node2, options=[ "-I", "checksum", "--force", "--db-exclude=db1", @@ -2222,7 +1949,7 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): self.compare_pgdata(pgdata1, pgdata2) - self.set_auto_conf(node2, {'port': node2.port}) + node2.set_auto_conf({'port': node2.port}) node2.slow_start() node2.safe_psql( @@ -2252,40 +1979,34 @@ def test_incremental_partial_restore_deny(self): """ Do now allow partial incremental restore into non-empty PGDATA becase we can't limit WAL replay to a single database. + PBCKP-604 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() for i in range(1, 3): node.safe_psql('postgres', f'CREATE database db{i}') # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgdata = self.pgdata_content(node.data_dir) - try: - self.restore_node(backup_dir, 'node', node, options=["--db-include=db1", '-I', 'LSN']) - self.fail("incremental partial restore is not allowed") - except ProbackupException as e: - self.assertIn("Incremental restore is not allowed: Postmaster is running.", e.message) + self.pb.restore_node('node', node, options=["--db-include=db1", '-I', 'LSN'], + expect_error="because postmaster is running") + self.assertMessage(contains="Incremental restore is not allowed: Postmaster is running.") node.safe_psql('db2', 'create table x (id int)') node.safe_psql('db2', 'insert into x values (42)') node.stop() - try: - self.restore_node(backup_dir, 'node', node, options=["--db-include=db1", '-I', 'LSN']) - self.fail("because incremental partial restore is not allowed") - except ProbackupException as e: - self.assertIn("Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden", e.message) + self.pb.restore_node('node', node, options=["--db-include=db1", '-I', 'LSN'], + expect_error="because incremental partial restore is not allowed") + self.assertMessage(contains="Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden") node.slow_start() value = node.execute('db2', 'select * from x')[0][0] @@ -2296,15 +2017,13 @@ def test_deny_incremental_partial_restore_exclude_tablespace_checksum(self): Do now allow partial incremental restore into non-empty PGDATA becase we can't limit WAL replay to a single database. (case of tablespaces) + PBCKP-604 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -2335,67 +2054,58 @@ def test_deny_incremental_partial_restore_exclude_tablespace_checksum(self): db_list[line['datname']] = line['oid'] # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # node2 - node2 = self.make_simple_node('node2') + node2 = self.pg_node.make_simple('node2') node2.cleanup() node2_tablespace = self.get_tblspace_path(node2, 'somedata') # in node2 restore full backup - self.restore_node( - backup_dir, 'node', + self.pb.restore_node( + 'node', node2, options=[ "-T", f"{node_tablespace}={node2_tablespace}"]) # partial incremental restore into node2 - try: - self.restore_node(backup_dir, 'node', node2, - options=["-I", "checksum", - "--db-exclude=db1", - "--db-exclude=db5", - "-T", f"{node_tablespace}={node2_tablespace}"]) - self.fail("remapped tablespace contain old data") - except ProbackupException as e: - pass - - try: - self.restore_node(backup_dir, 'node', node2, - options=[ - "-I", "checksum", "--force", - "--db-exclude=db1", "--db-exclude=db5", - "-T", f"{node_tablespace}={node2_tablespace}"]) - self.fail("incremental partial restore is not allowed") - except ProbackupException as e: - self.assertIn("Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden", e.message) + self.pb.restore_node('node', node2, + options=["-I", "checksum", + "--db-exclude=db1", + "--db-exclude=db5", + "-T", f"{node_tablespace}={node2_tablespace}"], + expect_error="because remapped tablespace contain old data") + + self.pb.restore_node('node', node2, + options=[ + "-I", "checksum", "--force", + "--db-exclude=db1", "--db-exclude=db5", + "-T", f"{node_tablespace}={node2_tablespace}"], + expect_error="because incremental partial restore is not allowed") + self.assertMessage(contains="Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden") def test_incremental_pg_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - initdb_params=['--data-checksums']) + node1 = self.pg_node.make_simple('node1') node1.cleanup() node.pgbench_init(scale=5) # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # in node1 restore full backup - self.restore_node(backup_dir, 'node', node1) - self.set_auto_conf(node1, {'port': node1.port}) + self.pb.restore_node('node', node1) + node1.set_auto_conf({'port': node1.port}) node1.slow_start() pgbench = node.pgbench( @@ -2411,14 +2121,14 @@ def test_incremental_pg_filenode_map(self): 'reindex index pg_type_oid_index') # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) node1.stop() # incremental restore into node1 - self.restore_node(backup_dir, 'node', node1, options=["-I", "checksum"]) + self.pb.restore_node('node', node1, options=["-I", "checksum"]) - self.set_auto_conf(node1, {'port': node1.port}) + node1.set_auto_conf({'port': node1.port}) node1.slow_start() node1.safe_psql( @@ -2426,3 +2136,194 @@ def test_incremental_pg_filenode_map(self): 'select 1') # check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn + + # @unittest.skip("skip") + @needs_gdb + def test_incr_restore_issue_313(self): + """ + Check that failed incremental restore can be restarted + """ + # fname = self.id().split('.')[3] + + node = self.pg_node.make_simple('node', + set_replication=True) + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + node.pgbench_init(scale = 50) + + full_backup_id = self.pb.backup_node('node', node, backup_type='full') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + last_backup_id = self.pb.backup_node('node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + self.pb.restore_node('node', node, backup_id=full_backup_id) + + count = 0 + filelist = self.get_backup_filelist(backup_dir, 'node', last_backup_id) + for file in filelist: + # count only nondata files + if int(filelist[file]['is_datafile']) == 0 and \ + filelist[file]['kind'] != 'dir' and \ + file != 'database_map': + count += 1 + + gdb = self.pb.restore_node('node', node, gdb=True, + backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) + gdb.verbose = False + gdb.set_breakpoint('restore_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(count - 1) + gdb.quit() + + bak_file = os.path.join(node.data_dir, 'global', 'pg_control.pbk.bak') + self.assertTrue( + os.path.exists(bak_file), + "pg_control bak File should not exist: {0}".format(bak_file)) + + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup is not fully restored") + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + self.assertIn( + "postgres: could not find the database system", + f.read()) + self.pb.restore_node('node', node, + backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) + node.slow_start() + + self.compare_pgdata(pgdata, self.pgdata_content(node.data_dir)) + + # @unittest.skip("skip") + def test_skip_pages_at_non_zero_segment_checksum(self): + if self.remote: + self.skipTest("Skipped because this test doesn't work properly in remote mode yet") + + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # create table of size > 1 GB, so it will have several segments + node.safe_psql( + 'postgres', + "create table t as select i as a, i*2 as b, i*3 as c, i*4 as d, i*5 as e " + "from generate_series(1,20600000) i; " + "CHECKPOINT ") + + filepath = node.safe_psql( + 'postgres', + "SELECT pg_relation_filepath('t')" + ).decode('utf-8').rstrip() + + # segment .1 must exist in order to proceed this test + self.assertTrue(os.path.exists(f'{os.path.join(node.data_dir, filepath)}.1')) + + # do full backup + self.pb.backup_node('node', node) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 101; " + "CHECKPOINT") + + # do incremental backup + self.pb.backup_node('node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 201; " + "CHECKPOINT") + + node.stop() + + self.pb.restore_node('node', node, + options=["-j", "4", "--incremental-mode=checksum", "--log-level-console=INFO"]) + + self.assertNotIn('WARNING: Corruption detected in file', self.output, + 'Incremental restore copied pages from .1 datafile segment that were not changed') + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_skip_pages_at_non_zero_segment_lsn(self): + if self.remote: + self.skipTest("Skipped because this test doesn't work properly in remote mode yet") + + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # create table of size > 1 GB, so it will have several segments + node.safe_psql( + 'postgres', + "create table t as select i as a, i*2 as b, i*3 as c, i*4 as d, i*5 as e " + "from generate_series(1,20600000) i; " + "CHECKPOINT ") + + filepath = node.safe_psql( + 'postgres', + "SELECT pg_relation_filepath('t')" + ).decode('utf-8').rstrip() + + # segment .1 must exist in order to proceed this test + self.assertTrue(os.path.exists(f'{os.path.join(node.data_dir, filepath)}.1')) + + # do full backup + self.pb.backup_node('node', node) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 101; " + "CHECKPOINT") + + # do incremental backup + self.pb.backup_node('node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 201; " + "CHECKPOINT") + + node.stop() + + self.pb.restore_node('node', node, + options=["-j", "4", "--incremental-mode=lsn", "--log-level-console=INFO"]) + + self.assertNotIn('WARNING: Corruption detected in file', self.output, + 'Incremental restore copied pages from .1 datafile segment that were not changed') + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/init_test.py b/tests/init_test.py index 4e000c78f..92e684335 100644 --- a/tests/init_test.py +++ b/tests/init_test.py @@ -1,139 +1,218 @@ import os -import unittest -from .helpers.ptrack_helpers import dir_files, ProbackupTest, ProbackupException import shutil +import stat +import unittest + +from .helpers.ptrack_helpers import dir_files, ProbackupTest, fs_backup_class + +DIR_PERMISSION = 0o700 if os.name != 'nt' else 0o777 +CATALOG_DIRS = ['backups', 'wal'] + +class InitTest(ProbackupTest): -class InitTest(ProbackupTest, unittest.TestCase): + def tearDown(self): + super().tearDown() + # Remove some additional backup dirs + if hasattr(self, 'no_access_dir'): + shutil.rmtree(self.no_access_dir, ignore_errors=True) # @unittest.skip("skip") # @unittest.expectedFailure - def test_success(self): + def test_basic_success(self): """Success normal init""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) - self.init_pb(backup_dir) - self.assertEqual( - dir_files(backup_dir), - ['backups', 'wal'] - ) - self.add_instance(backup_dir, 'node', node) - self.assertIn( - "INFO: Instance 'node' successfully deleted", - self.del_instance(backup_dir, 'node'), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd)) + instance_name = 'node' + backup_dir = self.backup_dir + node = self.pg_node.make_simple(instance_name) + self.pb.init() + + if backup_dir.is_file_based: + self.assertEqual( + dir_files(backup_dir), + CATALOG_DIRS + ) + + for subdir in CATALOG_DIRS: + dirname = os.path.join(backup_dir, subdir) + self.assertEqual(DIR_PERMISSION, stat.S_IMODE(os.stat(dirname).st_mode)) + + self.pb.add_instance(instance_name, node) + self.assertMessage(self.pb.del_instance(instance_name), + contains=f"INFO: Instance '{instance_name}' successfully deleted") # Show non-existing instance - try: - self.show_pb(backup_dir, 'node') - self.assertEqual(1, 0, 'Expecting Error due to show of non-existing instance. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' does not exist in this backup catalog", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) + error_result = self.pb.show(instance_name, as_text=True, expect_error=True) + self.assertMessage(error_result, + contains=f"ERROR: Instance '{instance_name}' does not exist in this backup catalog") # Delete non-existing instance - try: - self.del_instance(backup_dir, 'node1') - self.assertEqual(1, 0, 'Expecting Error due to delete of non-existing instance. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node1' does not exist in this backup catalog", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) + error_result = self.pb.del_instance('node1', expect_error=True) + self.assertMessage(error_result, + contains="ERROR: Instance 'node1' does not exist in this backup catalog") # Add instance without pgdata - try: - self.run_pb([ - "add-instance", - "--instance=node1", - "-B", backup_dir - ]) - self.assertEqual(1, 0, 'Expecting Error due to adding instance without pgdata. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: No postgres data directory specified.\n" - "Please specify it either using environment variable PGDATA or\ncommand line option --pgdata (-D)", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) + error_result = self.pb.run([ + "add-instance", + "--instance=node1", + ], expect_error=True) + self.assertMessage(error_result, + contains="No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)") # @unittest.skip("skip") - def test_already_exist(self): + def test_basic_already_exist(self): """Failure with backup catalog already existed""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) - self.init_pb(backup_dir) - try: - self.show_pb(backup_dir, 'node') - self.assertEqual(1, 0, 'Expecting Error due to initialization in non-empty directory. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' does not exist in this backup catalog", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + instance_name = 'node' + self.pg_node.make_simple(instance_name, checksum=False) + self.pb.init() + error_result = self.pb.show(instance_name, expect_error=True) + self.assertMessage(error_result, contains=f"ERROR: Instance '{instance_name}' " + f"does not exist in this backup catalog") # @unittest.skip("skip") - def test_abs_path(self): + def test_basic_abs_path(self): """failure with backup catalog should be given as absolute path""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) - try: - self.run_pb(["init", "-B", os.path.relpath("%s/backup" % node.base_dir, self.dir_path)]) - self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: -B, --backup-path must be an absolute path", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.pg_node.make_simple('node') + error_result = self.pb.run(["init", "-B", "../backups_fake"], expect_error=True, use_backup_dir=None) + self.assertMessage(error_result, regex="backup-path must be an absolute path") # @unittest.skip("skip") # @unittest.expectedFailure - def test_add_instance_idempotence(self): + def test_basic_add_instance_idempotence(self): """ https://github.com/postgrespro/pg_probackup/issues/219 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) - self.init_pb(backup_dir) + backup_dir = self.backup_dir + instance_name = 'node' + node = self.pg_node.make_simple(instance_name) + self.pb.init() + + self.pb.add_instance(instance_name, node) + self.remove_one_backup_instance(backup_dir, instance_name) + + self.write_instance_wal(backup_dir, instance_name, '0000', b'') + + error_message = self.pb.add_instance(instance_name, node, expect_error=True) + self.assertMessage(error_message, regex=fr"'{instance_name}'.*WAL.*already exists") + + error_message = self.pb.add_instance(instance_name, node, expect_error=True) + self.assertMessage(error_message, regex=fr"'{instance_name}'.*WAL.*already exists") - self.add_instance(backup_dir, 'node', node) - shutil.rmtree(os.path.join(backup_dir, 'backups', 'node')) + def test_init_backup_catalog_no_access(self): + """ Test pg_probackup init -B backup_dir to a dir with no read access. """ + if not self.backup_dir.is_file_based: + self.skipTest("permission test is not implemented on cloud storage") + backup_dir = self.build_backup_dir('noaccess/backup') + # Store no_access_dir for the teardown in an instance variable + self.no_access_dir = os.path.dirname(backup_dir) + os.makedirs(self.no_access_dir) + os.chmod(self.no_access_dir, stat.S_IREAD) - dir_backups = os.path.join(backup_dir, 'backups', 'node') - dir_wal = os.path.join(backup_dir, 'wal', 'node') + expected = f'ERROR: Cannot open backup catalog directory: Cannot open dir "{backup_dir}": Permission denied' + error_message = self.pb.init(use_backup_dir=backup_dir, expect_error=True) + self.assertMessage(error_message, contains=expected) + def test_init_backup_catalog_no_write(self): + """ Test pg_probackup init -B backup_dir to a dir with no write access. """ + if not self.backup_dir.is_file_based: + self.skipTest("permission test is not implemented on cloud storage") + backup_dir = self.build_backup_dir('noaccess/backup') + # Store no_access_dir for the teardown in an instance variable + self.no_access_dir = os.path.dirname(backup_dir) + os.makedirs(self.no_access_dir) + os.chmod(self.no_access_dir, stat.S_IREAD|stat.S_IEXEC) + + expected = 'ERROR: Can not create backup catalog root directory: Cannot make dir "{0}": Permission denied'.format(backup_dir) + error_message = self.pb.init(use_backup_dir=backup_dir, expect_error=True) + self.assertMessage(error_message, contains=expected) + + def test_init_backup_catalog_no_create(self): + """ Test pg_probackup init -B backup_dir to a dir when backup dir exists but not writeable. """ + if not self.backup_dir.is_file_based: + self.skipTest("permission test is not implemented on cloud storage") + os.makedirs(self.backup_dir) + os.chmod(self.backup_dir, stat.S_IREAD|stat.S_IEXEC) + + expected = 'ERROR: Can not create backup catalog data directory: Cannot make dir "{0}": Permission denied'.format(os.path.join(self.backup_dir, 'backups')) + error_message = self.pb.init(expect_error=True) + self.assertMessage(error_message, contains=expected) + + def test_init_backup_catalog_exists_not_empty(self): + """ Test pg_probackup init -B backup_dir which exists and not empty. """ + backup_dir = self.backup_dir + if backup_dir.is_file_based: + os.makedirs(backup_dir) + backup_dir.write_file('somefile.txt', 'hello') + + error_message = self.pb.init(expect_error=True) + self.assertMessage(error_message, contains=f"ERROR: Backup catalog '{backup_dir}' already exists and is not empty") + + def test_init_add_instance_with_special_symbols(self): + """ Test pg_probackup init -B backup_dir which exists and not empty. """ + backup_dir = self.backup_dir + instance_name = 'instance! -_.*(\')&$@=;:+,?\\{^}%`[\"]<>~#|' + node = self.pg_node.make_simple(instance_name) + self.pb.init() + + error_message = self.pb.add_instance(instance_name, node) + self.assertMessage(error_message, regex=fr"'INFO: Instance {instance_name}' successfully initialized") + + @unittest.skipUnless(fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_basic_dry_run_init(self): + """Success init with dry-run option""" + instance_name = 'node' + backup_dir = self.backup_dir + node = self.pg_node.make_simple(instance_name) + self.pb.init(options=['--dry-run']) + #Assertions + self.assertFalse(os.path.exists(backup_dir), + "Directory should not exist: {0}".format(backup_dir)) + + # Check existing backup directory + self.pb.init() + self.pb.init(options=['--dry-run', '--skip-if-exists']) + + # Access check suite if disk mounted as read_only + dir_mode = os.stat(self.test_path).st_mode + os.chmod(self.test_path, 0o500) + + error_message = self.pb.init(options=['--dry-run'], expect_error ='because of changed permissions') try: - self.add_instance(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' WAL archive directory already exists: ", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.assertMessage(error_message, contains='ERROR: Check permissions') + finally: + # Cleanup + os.chmod(self.test_path, dir_mode) + + node.cleanup() + + @unittest.skipUnless(fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_basic_dry_run_add_instance(self): + """ Check --dry-run option for add_instance command""" + instance_name = 'node' + backup_dir = self.backup_dir + node = self.pg_node.make_simple(instance_name) + self.pb.init() + self.pb.add_instance(instance_name, node, options=['--dry-run']) + # Assetions + self.assertFalse(os.listdir(os.path.join(backup_dir, "wal"))) + self.assertFalse(os.listdir(os.path.join(backup_dir, "backups"))) + # Check existing backup directory + self.pb.add_instance(instance_name, node) + self.pb.add_instance(instance_name, node, options=['--dry-run', '--skip-if-exists']) + + # Check access suite - if disk mounted as read_only + dir_path = os.path.join(self.test_path, 'backup') + dir_mode = os.stat(dir_path).st_mode + os.chmod(dir_path, 0o500) + + error_message = self.pb.add_instance(instance_name, node, options=['--dry-run'], + expect_error ='because of changed permissions') try: - self.add_instance(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' WAL archive directory already exists: ", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.assertMessage(error_message, contains='ERROR: Check permissions ') + finally: + # Cleanup + os.chmod(dir_path, dir_mode) + + node.cleanup() diff --git a/tests/locking_test.py b/tests/locking_test.py index 5367c2610..774ced840 100644 --- a/tests/locking_test.py +++ b/tests/locking_test.py @@ -1,35 +1,38 @@ +import re +import time import unittest import os -from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest, fs_backup_class +from pg_probackup2.gdb import needs_gdb -class LockingTest(ProbackupTest, unittest.TestCase): +class LockingTest(ProbackupTest): + + def setUp(self): + super().setUp() # @unittest.skip("skip") # @unittest.expectedFailure + @needs_gdb def test_locking_running_validate_1(self): """ make node, take full backup, stop it in the middle run validate, expect it to successfully executed, concurrent RUNNING backup with pid file and active process is legal """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_non_data_file') gdb.run_until_break() @@ -37,310 +40,253 @@ def test_locking_running_validate_1(self): gdb.continue_execution_until_break(20) self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) + 'RUNNING', self.pb.show('node')[1]['status']) + + validate_output = self.pb.validate(options=['--log-level-console=LOG']) - validate_output = self.validate_pb( - backup_dir, options=['--log-level-console=LOG']) + backup_id = self.pb.show('node')[1]['id'] - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] + self.assertIn( + "WARNING: Lock waiting timeout reached. Deleting lock file", + validate_output, + '\n Unexpected Validate Output 1: {0}\n'.format(repr( + validate_output))) self.assertIn( - "is using backup {0}, and is still running".format(backup_id), + "WARNING: Cannot lock backup {0} directory, skip validation".format(backup_id), validate_output, - '\n Unexpected Validate Output: {0}\n'.format(repr(validate_output))) + '\n Unexpected Validate Output 3: {0}\n'.format(repr( + validate_output))) self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) + 'RUNNING', self.pb.show('node')[1]['status']) # Clean after yourself gdb.kill() + @needs_gdb def test_locking_running_validate_2(self): """ make node, take full backup, stop it in the middle, - kill process so no cleanup is done - pid file is in place, - run validate, expect it to not successfully executed, - RUNNING backup with pid file AND without active pid is legal, - but his status must be changed to ERROR and pid file is deleted + kill process so no cleanup is done, then change lock file timestamp + to expired time, run validate, expect it to not successfully + executed, RUNNING backup with expired lock file is legal, but his + status must be changed to ERROR """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_non_data_file') gdb.run_until_break() gdb.continue_execution_until_break(20) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') gdb.continue_execution_until_error() self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because RUNNING backup is no longer active.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "which used backup {0} no longer exists".format( - backup_id) in e.message and - "Backup {0} has status RUNNING, change it " - "to ERROR and skip validation".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + 'RUNNING', self.pb.show('node')[1]['status']) + + backup_id = self.pb.show('node')[1]['id'] + + self.expire_locks(backup_dir, 'node') + + self.pb.validate(options=["--log-level-console=VERBOSE"], + expect_error="because RUNNING backup is no longer active") + self.assertMessage(regex=r"Lock \S* has expired") + self.assertMessage(contains=f"Backup {backup_id} has status RUNNING, change it " + "to ERROR and skip validation") + self.assertMessage(contains="WARNING: Some backups are not valid") self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) + 'ERROR', self.pb.show('node')[1]['status']) # Clean after yourself gdb.kill() + @needs_gdb def test_locking_running_validate_2_specific_id(self): """ make node, take full backup, stop it in the middle, - kill process so no cleanup is done - pid file is in place, - run validate on this specific backup, - expect it to not successfully executed, - RUNNING backup with pid file AND without active pid is legal, - but his status must be changed to ERROR and pid file is deleted + kill process so no cleanup is done, then change lock file timestamp + to expired time, run validate on this specific backup, expect it to + not successfully executed, RUNNING backup with expired lock file is + legal, but his status must be changed to ERROR """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_non_data_file') gdb.run_until_break() gdb.continue_execution_until_break(20) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') gdb.continue_execution_until_error() self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - try: - self.validate_pb(backup_dir, 'node', backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because RUNNING backup is no longer active.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "which used backup {0} no longer exists".format( - backup_id) in e.message and - "Backup {0} has status RUNNING, change it " - "to ERROR and skip validation".format( - backup_id) in e.message and - "ERROR: Backup {0} has status: ERROR".format(backup_id) in - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + 'RUNNING', self.pb.show('node')[1]['status']) + + backup_id = self.pb.show('node')[1]['id'] + + self.expire_locks(backup_dir, 'node') + + self.pb.validate('node', backup_id, + options=['--log-level-console=VERBOSE'], + expect_error="because RUNNING backup is no longer active") + self.assertMessage(regex=r"Lock \S* has expired") + self.assertMessage(contains=f"Backup {backup_id} has status RUNNING, change it " + "to ERROR and skip validation") + self.assertMessage(contains=f"ERROR: Backup {backup_id} has status: ERROR") self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) - - try: - self.validate_pb(backup_dir, 'node', backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because backup has status ERROR.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} has status: ERROR".format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because backup has status ERROR.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Backup {0} has status ERROR. Skip validation".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + 'ERROR', self.pb.show('node')[1]['status']) + + self.pb.validate('node', backup_id, + expect_error="because backup has status ERROR") + self.assertMessage(contains=f"ERROR: Backup {backup_id} has status: ERROR") + + self.pb.validate(expect_error="because backup has status ERROR") + self.assertMessage(contains=f"WARNING: Backup {backup_id} has status ERROR. Skip validation") + self.assertMessage(contains="WARNING: Some backups are not valid") # Clean after yourself gdb.kill() + @needs_gdb def test_locking_running_3(self): """ - make node, take full backup, stop it in the middle, - terminate process, delete pid file, - run validate, expect it to not successfully executed, - RUNNING backup without pid file AND without active pid is legal, - his status must be changed to ERROR + make node, take full backup, stop it in the middle, kill process so + no cleanup is done, delete lock file to expired time, run validate, + expect it to not successfully executed, RUNNING backup with expired + lock file is legal, but his status must be changed to ERROR """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_non_data_file') gdb.run_until_break() gdb.continue_execution_until_break(20) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') gdb.continue_execution_until_error() self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - 'RUNNING', self.show_pb(backup_dir, 'node')[1]['status']) - - backup_id = self.show_pb(backup_dir, 'node')[1]['id'] - - os.remove( - os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid')) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because RUNNING backup is no longer active.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "Backup {0} has status RUNNING, change it " - "to ERROR and skip validation".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + 'RUNNING', self.pb.show('node')[1]['status']) + + backup_id = self.pb.show('node')[1]['id'] + + # delete lock file + self.expire_locks(backup_dir, 'node') + + self.pb.validate( + expect_error="because RUNNING backup is no longer active") + self.assertMessage(contains=f"Backup {backup_id} has status RUNNING, change it " + "to ERROR and skip validation") + self.assertMessage(contains="WARNING: Some backups are not valid") self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - 'ERROR', self.show_pb(backup_dir, 'node')[1]['status']) + 'ERROR', self.pb.show('node')[1]['status']) # Clean after yourself gdb.kill() + @needs_gdb def test_locking_restore_locked(self): """ make node, take full backup, take two page backups, launch validate on PAGE1 and stop it in the middle, launch restore of PAGE2. - Expect restore to sucseed because read-only locks + Expect restore to succeed because read-only locks do not conflict """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # PAGE1 - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') # PAGE2 - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') - gdb = self.validate_pb( - backup_dir, 'node', backup_id=backup_id, gdb=True) + gdb = self.pb.validate('node', backup_id=backup_id, gdb=True) gdb.set_breakpoint('pgBackupValidate') gdb.run_until_break() node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) # Clean after yourself gdb.kill() + @needs_gdb def test_concurrent_delete_and_restore(self): """ make node, take full backup, take page backup, @@ -349,26 +295,22 @@ def test_concurrent_delete_and_restore(self): Expect restore to fail because validation of intermediate backup is impossible """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # PAGE1 - restore_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + restore_id = self.pb.backup_node('node', node, backup_type='page') - gdb = self.delete_pb( - backup_dir, 'node', backup_id=backup_id, gdb=True) + gdb = self.pb.delete('node', backup_id=backup_id, gdb=True) # gdb.set_breakpoint('pgFileDelete') gdb.set_breakpoint('delete_backup_files') @@ -376,127 +318,100 @@ def test_concurrent_delete_and_restore(self): node.cleanup() - try: - self.restore_node( - backup_dir, 'node', node, options=['--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because restore without whole chain validation " - "is prohibited unless --no-validate provided.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "Backup {0} is used without validation".format( - restore_id) in e.message and - 'is using backup {0}, and is still running'.format( - backup_id) in e.message and - 'ERROR: Cannot lock backup' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, options=['--no-validate'], + expect_error="because restore without whole chain validation " + "is prohibited unless --no-validate provided.") + self.assertMessage(contains=f"Backup {restore_id} is used without validation") + self.assertMessage(contains='WARNING: Lock waiting timeout reached.') + self.assertMessage(contains=f'Cannot lock backup {backup_id} directory') + self.assertMessage(contains='ERROR: Cannot lock backup') # Clean after yourself gdb.kill() + @needs_gdb def test_locking_concurrent_validate_and_backup(self): """ make node, take full backup, launch validate and stop it in the middle, take page backup. Expect PAGE backup to be successfully executed """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # PAGE2 - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') - gdb = self.validate_pb( - backup_dir, 'node', backup_id=backup_id, gdb=True) + gdb = self.pb.validate('node', backup_id=backup_id, gdb=True) gdb.set_breakpoint('pgBackupValidate') gdb.run_until_break() # This PAGE backup is expected to be successfull - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # Clean after yourself gdb.kill() - def test_locking_concurren_restore_and_delete(self): + @needs_gdb + def test_locking_concurrent_restore_and_delete(self): """ make node, take full backup, launch restore and stop it in the middle, delete full backup. Expect it to fail. """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) node.cleanup() - gdb = self.restore_node(backup_dir, 'node', node, gdb=True) + gdb = self.pb.restore_node('node', node=node, gdb=True) gdb.set_breakpoint('create_data_directories') gdb.run_until_break() - try: - self.delete_pb(backup_dir, 'node', full_id) - self.assertEqual( - 1, 0, - "Expecting Error because backup is locked\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Cannot lock backup {0} directory".format(full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.delete('node', full_id, + expect_error="because backup is locked") + self.assertMessage(contains=f"ERROR: Cannot lock backup {full_id} directory") # Clean after yourself gdb.kill() + @unittest.skipIf(not fs_backup_class.is_file_based, "os.rename is not implemented in a cloud") def test_backup_directory_name(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL - full_id_1 = self.backup_node(backup_dir, 'node', node) - page_id_1 = self.backup_node(backup_dir, 'node', node, backup_type='page') + full_id_1 = self.pb.backup_node('node', node) + page_id_1 = self.pb.backup_node('node', node, backup_type='page') - full_id_2 = self.backup_node(backup_dir, 'node', node) - page_id_2 = self.backup_node(backup_dir, 'node', node, backup_type='page') + full_id_2 = self.pb.backup_node('node', node) + page_id_2 = self.pb.backup_node('node', node, backup_type='page') node.cleanup() @@ -505,125 +420,489 @@ def test_backup_directory_name(self): os.rename(old_path, new_path) - # This PAGE backup is expected to be successfull - self.show_pb(backup_dir, 'node', full_id_1) - - self.validate_pb(backup_dir) - self.validate_pb(backup_dir, 'node') - self.validate_pb(backup_dir, 'node', full_id_1) + self.pb.show('node', full_id_1) - self.restore_node(backup_dir, 'node', node, backup_id=full_id_1) + self.pb.validate() + self.pb.validate('node') + self.pb.validate('node', full_id_1) - self.delete_pb(backup_dir, 'node', full_id_1) + self.pb.restore_node('node', node=node, backup_id=full_id_1) - old_path = os.path.join(backup_dir, 'backups', 'node', full_id_2) - new_path = os.path.join(backup_dir, 'backups', 'node', 'hello_kitty') + self.pb.delete('node', full_id_1) - self.set_backup( - backup_dir, 'node', full_id_2, options=['--note=hello']) + self.pb.set_backup('node', full_id_2, options=['--note=hello']) - self.merge_backup(backup_dir, 'node', page_id_2, options=["-j", "4"]) + self.pb.merge_backup('node', page_id_2, options=["-j", "4"]) self.assertNotIn( 'note', - self.show_pb(backup_dir, 'node', page_id_2)) + self.pb.show('node', page_id_2)) # Clean after yourself - def test_empty_lock_file(self): + + @needs_gdb + def test_locks_delete(self): """ - https://github.com/postgrespro/pg_probackup/issues/308 + Make sure that shared and exclusive locks are deleted + after end of pg_probackup operations """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Fill with data - node.pgbench_init(scale=100) + node.pgbench_init(scale=1) # FULL - backup_id = self.backup_node(backup_dir, 'node', node) + gdb = self.pb.backup_node('node', node, gdb=True) - lockfile = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') - with open(lockfile, "w+") as f: - f.truncate() + gdb.set_breakpoint('do_backup_pg') + gdb.run_until_break() - out = self.validate_pb(backup_dir, 'node', backup_id) + backup_id = self.pb.show('node')[0]['id'] - self.assertIn( - "Waiting 30 seconds on empty exclusive lock for backup", out) - -# lockfile = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') -# with open(lockfile, "w+") as f: -# f.truncate() -# -# p1 = self.validate_pb(backup_dir, 'node', backup_id, asynchronous=True, -# options=['--log-level-file=LOG', '--log-filename=validate.log']) -# sleep(3) -# p2 = self.delete_pb(backup_dir, 'node', backup_id, asynchronous=True, -# options=['--log-level-file=LOG', '--log-filename=delete.log']) -# -# p1.wait() -# p2.wait() - - def test_shared_lock(self): - """ - Make sure that shared lock leaves no files with pids - """ - self._check_gdb_flag_or_skip_test() + locks = self.get_locks(backup_dir, 'node') + + self.assertEqual(len(locks), 1, + f"There should be just 1 lock, got {locks}") + self.assertTrue(locks[0].startswith(backup_id+"_"), + f"Lock should be for backup {backup_id}, got {locks[0]}") + self.assertTrue(locks[0].endswith("_w"), + f"Lock should be exclusive got {locks[0]}") + + gdb.continue_execution_until_exit() + + locks = self.get_locks(backup_dir, 'node') + self.assertFalse(locks, f"Locks should not exist, got {locks}") + + gdb = self.pb.validate('node', backup_id, gdb=True) + + gdb.set_breakpoint('validate_one_page') + gdb.run_until_break() + + locks = self.get_locks(backup_dir, 'node') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + self.assertEqual(len(locks), 1, + f"There should be just 1 lock, got {locks}") + self.assertTrue(locks[0].startswith(backup_id+"_"), + f"Lock should be for backup {backup_id}, got {locks[0]}") + self.assertTrue(locks[0].endswith("_r"), + f"Lock should be shared got {locks[0]}") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + gdb.continue_execution_until_exit() + + locks = self.get_locks(backup_dir, 'node') + self.assertFalse(locks, f"Locks should not exist, got {locks}") + + + @needs_gdb + def test_concurrent_merge_1(self): + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Fill with data node.pgbench_init(scale=1) - # FULL - backup_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node, backup_type="full") - lockfile_excl = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid') - lockfile_shr = os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup_ro.pid') + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() - self.validate_pb(backup_dir, 'node', backup_id) + self.pb.backup_node('node', node, backup_type="page") - self.assertFalse( - os.path.exists(lockfile_excl), - "File should not exist: {0}".format(lockfile_excl)) + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() - self.assertFalse( - os.path.exists(lockfile_shr), - "File should not exist: {0}".format(lockfile_shr)) - - gdb = self.validate_pb(backup_dir, 'node', backup_id, gdb=True) + prev_id = self.pb.backup_node('node', node, backup_type="page") - gdb.set_breakpoint('validate_one_page') + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + last_id = self.pb.backup_node('node', node, backup_type="page") + + gdb = self.pb.merge_backup('node', prev_id, gdb=True) + gdb.set_breakpoint("merge_chain") gdb.run_until_break() - gdb.kill() - self.assertTrue( - os.path.exists(lockfile_shr), - "File should exist: {0}".format(lockfile_shr)) - - self.validate_pb(backup_dir, 'node', backup_id) + self.pb.merge_backup('node', last_id, + expect_error="because of concurrent merge") + self.assertMessage(contains=f"ERROR: Cannot lock backup {full_id}") + + @needs_gdb + def test_concurrent_merge_2(self): + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=1) + + full_id = self.pb.backup_node('node', node, backup_type="full") + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + prev_id = self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + last_id = self.pb.backup_node('node', node, backup_type="page") + + gdb = self.pb.merge_backup('node', prev_id, gdb=True) + # pthread_create will be called after state changed to merging + gdb.set_breakpoint("merge_files") + gdb.run_until_break() + + print(self.pb.show('node', as_text=True, as_json=False)) + self.assertEqual( + 'MERGING', self.pb.show('node')[0]['status']) + self.assertEqual( + 'MERGING', self.pb.show('node')[-2]['status']) + + self.pb.merge_backup('node', last_id, + expect_error="because of concurrent merge") + self.assertMessage(contains=f"ERROR: Full backup {full_id} has unfinished merge") + + @needs_gdb + def test_concurrent_merge_and_backup_1(self): + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=1) + + full_id = self.pb.backup_node('node', node, backup_type="full") + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") - self.assertFalse( - os.path.exists(lockfile_excl), - "File should not exist: {0}".format(lockfile_excl)) + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() - self.assertFalse( - os.path.exists(lockfile_shr), - "File should not exist: {0}".format(lockfile_shr)) + prev_id = self.pb.backup_node('node', node, backup_type="page") + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + gdb = self.pb.merge_backup('node', prev_id, gdb=True) + gdb.set_breakpoint("merge_chain") + gdb.run_until_break() + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + @needs_gdb + def test_concurrent_merge_and_backup_2(self): + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=1) + + full_id = self.pb.backup_node('node', node, backup_type="full") + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + prev_id = self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + gdb = self.pb.merge_backup('node', prev_id, gdb=True) + # pthread_create will be called after state changed to merging + gdb.set_breakpoint("merge_files") + gdb.run_until_break() + + pgbench = node.pgbench(options=['-t', '2000', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page", + expect_error="because of concurrent merge") + self.assertMessage( + contains="WARNING: Valid full backup on current timeline 1 " + "is not found, trying to look up on previous timelines") + self.assertMessage( + contains="WARNING: Cannot find valid backup on previous timelines") + self.assertMessage( + contains="ERROR: Create new full backup before an incremental one") + + + @needs_gdb + def test_locks_race_condition(self): + """ + Make locks race condition happen and check that pg_probackup + detected it and retried taking new lock. + + Run full backup. Set breakpoint on create_lock_file function, + stop there. Then run 'pg_probackup delete' command on current full + backup, stop it after taking a lock and before deleting its lock + file. Then continue taking full backup -- it must encounter race + condition because lock file of delete operation appeared between + two checks of /locks directory for concurrent locks + (scan_locks_directory function). + """ + + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # start full backup + gdb_backup = self.pb.backup_node('node', node, + options=['--log-level-console=LOG'], gdb=True) + + gdb_backup.set_breakpoint('create_lock_file') + gdb_backup.run_until_break() + + backup_id = self.pb.show('node')[0]['id'] + + gdb_delete = self.pb.delete('node', backup_id, + options=['--log-level-console=LOG'], gdb=True) + + gdb_delete.set_breakpoint('create_lock_file') + gdb_delete.run_until_break() + + # we scanned locks directory and found no concurrent locks + # so we proceed to the second scan for race condition check + gdb_backup.set_breakpoint('scan_locks_directory') + gdb_delete.set_breakpoint('scan_locks_directory') + + # we create lock files with no contents + gdb_backup.continue_execution_until_break() + gdb_delete.continue_execution_until_break() + + # check that both exclusive lock files were created with empty contents + locks_list = self.get_locks(backup_dir, 'node') + locks_list_race = locks_list + + self.assertEqual(len(locks_list), 2) + + self.assertFalse(self.read_lock(backup_dir, 'node', locks_list[0])) + self.assertFalse(self.read_lock(backup_dir, 'node', locks_list[1])) + + gdb_backup.set_breakpoint('pioRemove__do') + gdb_delete.set_breakpoint('pioRemove__do') + + # we wait for message about race condition and stop right before we + # delete concurrent lock files to make both processes encounter race + # condition + gdb_backup.continue_execution_until_break() + self.assertIn("Lock race condition detected, taking lock attempt 1 " + "failed", gdb_backup.output) + gdb_delete.continue_execution_until_break() + self.assertIn("Lock race condition detected, taking lock attempt 1 " + "failed", gdb_delete.output) + + # run until next breakpoint ('scan_locks_directory') so old lock + # files will be deleted + gdb_backup.continue_execution_until_break() + gdb_delete.continue_execution_until_break() + + locks_list = self.get_locks(backup_dir, 'node') + self.assertFalse(locks_list) + + # continue backup execution until 'unlock_backup' at-exit util + gdb_backup.remove_all_breakpoints() + gdb_backup.set_breakpoint('unlock_backup') + gdb_backup.continue_execution_until_break() + + locks_list = self.get_locks(backup_dir, 'node') + self.assertTrue(locks_list, + f"Expecting at least 1 lock, got no") + self.assertLessEqual(len(locks_list), 2, + f"Expecting 1 or 2 locks, got {locks_list}") + if len(locks_list) == 2: + id1 = "_".join(locks_list[0].split("_", 2)[:2]) + id2 = "_".join(locks_list[1].split("_", 2)[:2]) + self.assertEqual(id1, id2) + + lock_backup = locks_list[0] + self.assertIn(f"{lock_backup} was taken", + gdb_backup.output) + self.assertFalse(self.read_lock(backup_dir, 'node', lock_backup)) + self.assertNotIn(lock_backup, locks_list_race) + + # make sure that delete command keeps waiting for backup unlocking + gdb_delete.remove_all_breakpoints() + gdb_delete.set_breakpoint('wait_for_conflicting_locks') + gdb_delete.continue_execution_until_break() + + locks_list = self.get_locks(backup_dir, 'node') + self.assertEqual(len(locks_list), 2) + self.assertNotEqual(locks_list[0], locks_list[1]) + lock_delete = (set(locks_list) - {lock_backup}).pop() + + self.assertNotIn(lock_delete, locks_list_race) + self.assertTrue(self.read_lock(backup_dir, 'node', lock_delete)) + + gdb_delete.remove_all_breakpoints() + gdb_delete.set_breakpoint('sleep') + gdb_delete.continue_execution_until_break() + gdb_delete.remove_all_breakpoints() + + self.assertIn(f"Waiting to take lock for backup {backup_id}", + gdb_delete.output) + + # continue all commands + gdb_backup.continue_execution_until_exit() + gdb_delete.continue_execution_until_exit() + + # make sure that locks were deleted + locks_list = self.get_locks(backup_dir, 'node') + self.assertFalse(locks_list) + + + @needs_gdb + def test_expired_locks_delete(self): + """ + check that if locks (shared or exclusive) have timestamp older than + ( - LOCK_LIFETIME) they are deleted by running + pg_probackup process + """ + + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + gdb = self.pb.backup_node('node', node, gdb=True) + + gdb.set_breakpoint('backup_non_data_file') + gdb.run_until_break() + + gdb.continue_execution_until_break(20) + + gdb.signal('SIGKILL') + gdb.continue_execution_until_error() + + self.assertEqual( + 'RUNNING', self.pb.show('node')[0]['status']) + + self.expire_locks(backup_dir, 'node', seconds=3600) + + stale_locks_list = self.get_locks(backup_dir, 'node') + self.assertEqual(len(stale_locks_list), 1) + + backup_id = self.pb.show('node')[0]['id'] + + gdb = self.pb.validate('node', backup_id, gdb=True, + options=['--log-level-console=LOG']) + gdb.set_breakpoint('pgBackupValidate') + gdb.run_until_break() + + self.assertRegex(gdb.output, + r"Expired lock file \S* is older than 180 seconds, deleting") + + new_locks_list = self.get_locks(backup_dir, 'node') + self.assertEqual(len(new_locks_list), 1) + self.assertFalse(set(stale_locks_list) & set(new_locks_list)) + + gdb.continue_execution_until_exit() + + self.assertEqual( + 'ERROR', self.pb.show('node')[0]['status']) + + + @needs_gdb + def test_locks_renovate_time(self): + """ + check that daemon thread renovates locks (shared or exclusive) + timestamps when they are about to expire + """ + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + + node.slow_start() + + gdb = self.pb.backup_node('node', node, gdb=True, options=['-j', '1', + '--write-rate-limit=1', + '--log-level-file=LOG']) + + # we need to stop just main thread + gdb._execute("set pagination off") + gdb._execute("set non-stop on") + gdb.set_breakpoint('do_backup') + gdb.run_until_break() + gdb._execute("set val_LOCK_RENOVATE_TIME=2") + gdb.set_breakpoint('do_backup_pg') + gdb.continue_execution_until_break() + + self.assertEqual( + 'RUNNING', self.pb.show('node')[0]['status']) + + locks_1 = self.get_locks(backup_dir, 'node') + self.assertLessEqual(len(locks_1), 2) + lock_id = '_'.join(locks_1[0].split('_', 2)[:2]) + + for attempt in range(25): + time.sleep(4) + locks_2 = self.get_locks(backup_dir, 'node') + if set(locks_1) != set(locks_2) and len(locks_2) == 2: + new = (set(locks_2) - set(locks_1)).pop() + self.assertTrue(new.startswith(lock_id)) + break + else: + self.fail("locks didn't renovate in 100 seconds") + + + gdb.remove_all_breakpoints() + gdb.continue_execution_until_exit() + + self.assertEqual( + 'OK', self.pb.show('node')[0]['status']) \ No newline at end of file diff --git a/tests/logging_test.py b/tests/logging_test.py index c5cdfa344..85e646c1e 100644 --- a/tests/logging_test.py +++ b/tests/logging_test.py @@ -1,38 +1,33 @@ -import unittest import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest +from pg_probackup2.gdb import needs_gdb import datetime -class LogTest(ProbackupTest, unittest.TestCase): +class LogTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure # PGPRO-2154 + @needs_gdb def test_log_rotation(self): """ """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=['--log-rotation-age=1s', '--log-rotation-size=1MB']) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=['--stream', '--log-level-file=verbose']) - gdb = self.backup_node( - backup_dir, 'node', node, + gdb = self.pb.backup_node('node', node, options=['--stream', '--log-level-file=verbose'], gdb=True) gdb.set_breakpoint('open_logfile') @@ -40,22 +35,18 @@ def test_log_rotation(self): gdb.continue_execution_until_exit() def test_log_filename_strftime(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=['--log-rotation-age=1d']) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=VERBOSE', @@ -63,37 +54,30 @@ def test_log_filename_strftime(self): day_of_week = datetime.datetime.today().strftime("%a") - path = os.path.join( - backup_dir, 'log', 'pg_probackup-{0}.log'.format(day_of_week)) + path = os.path.join(self.pb_log_path, 'pg_probackup-{0}.log'.format(day_of_week)) self.assertTrue(os.path.isfile(path)) def test_truncate_rotation_file(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=['--log-rotation-age=1d']) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=VERBOSE']) - rotation_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log.rotation') + rotation_file_path = os.path.join(self.pb_log_path, 'pg_probackup.log.rotation') - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') + log_file_path = os.path.join(self.pb_log_path, 'pg_probackup.log') log_file_size = os.stat(log_file_path).st_size @@ -105,8 +89,7 @@ def test_truncate_rotation_file(self): f.flush() f.close - output = self.backup_node( - backup_dir, 'node', node, + output = self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=LOG'], @@ -121,8 +104,7 @@ def test_truncate_rotation_file(self): 'WARNING: cannot read creation timestamp from rotation file', output) - output = self.backup_node( - backup_dir, 'node', node, + output = self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=LOG'], @@ -140,31 +122,25 @@ def test_truncate_rotation_file(self): self.assertTrue(os.path.isfile(rotation_file_path)) def test_unlink_rotation_file(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=['--log-rotation-age=1d']) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=VERBOSE']) - rotation_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log.rotation') + rotation_file_path = os.path.join(self.pb_log_path, 'pg_probackup.log.rotation') - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') + log_file_path = os.path.join(self.pb_log_path, 'pg_probackup.log') log_file_size = os.stat(log_file_path).st_size @@ -173,8 +149,7 @@ def test_unlink_rotation_file(self): # unlink .rotation file os.unlink(rotation_file_path) - output = self.backup_node( - backup_dir, 'node', node, + output = self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=LOG'], @@ -191,8 +166,7 @@ def test_unlink_rotation_file(self): self.assertTrue(os.path.isfile(rotation_file_path)) - output = self.backup_node( - backup_dir, 'node', node, + output = self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=VERBOSE'], @@ -208,31 +182,24 @@ def test_unlink_rotation_file(self): log_file_size) def test_garbage_in_rotation_file(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - self.set_config( - backup_dir, 'node', + self.pb.set_config('node', options=['--log-rotation-age=1d']) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=VERBOSE']) - rotation_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log.rotation') + rotation_file_path = os.path.join(self.pb_log_path, 'pg_probackup.log.rotation') - log_file_path = os.path.join( - backup_dir, 'log', 'pg_probackup.log') + log_file_path = os.path.join(self.pb_log_path, 'pg_probackup.log') log_file_size = os.stat(log_file_path).st_size @@ -241,8 +208,7 @@ def test_garbage_in_rotation_file(self): # mangle .rotation file with open(rotation_file_path, "w+b", 0) as f: f.write(b"blah") - output = self.backup_node( - backup_dir, 'node', node, + output = self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=LOG'], @@ -263,8 +229,7 @@ def test_garbage_in_rotation_file(self): self.assertTrue(os.path.isfile(rotation_file_path)) - output = self.backup_node( - backup_dir, 'node', node, + output = self.pb.backup_node('node', node, options=[ '--stream', '--log-level-file=LOG'], @@ -280,28 +245,24 @@ def test_garbage_in_rotation_file(self): log_file_size) def test_issue_274(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.backup_node(backup_dir, 'node', node, options=['--stream']) - self.restore_node(backup_dir, 'node', replica) + self.pb.backup_node('node', node, options=['--stream']) + self.pb.restore_node('node', node=replica) # Settings for Replica self.set_replica(node, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) - self.set_auto_conf(replica, {'port': replica.port}) + self.pb.set_archiving('node', replica, replica=True) + replica.set_auto_conf({'port': replica.port}) replica.slow_start(replica=True) @@ -311,28 +272,16 @@ def test_issue_274(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,45600) i") - log_dir = os.path.join(backup_dir, "somedir") + log_dir = os.path.join(self.test_path, "somedir") - try: - self.backup_node( - backup_dir, 'node', replica, backup_type='page', + self.pb.backup_node('node', replica, backup_type='page', options=[ '--log-level-console=verbose', '--log-level-file=verbose', '--log-directory={0}'.format(log_dir), '-j1', '--log-filename=somelog.txt', '--archive-timeout=5s', - '--no-validate', '--log-rotation-size=100KB']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of archiving timeout" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: WAL segment', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + '--no-validate', '--log-rotation-size=100KB'], + expect_error="because of archiving timeout") + self.assertMessage(contains='ERROR: WAL segment') log_file_path = os.path.join( log_dir, 'somelog.txt') diff --git a/tests/merge_test.py b/tests/merge_test.py index a9bc6fe68..e67f446cc 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -2,34 +2,40 @@ import unittest import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +from .compression_test import have_alg +from .helpers.ptrack_helpers import ProbackupTest +from .helpers.ptrack_helpers import fs_backup_class +from .helpers.ptrack_helpers import base36enc, base36dec +from pg_probackup2.gdb import needs_gdb from testgres import QueryException import shutil from datetime import datetime, timedelta import time import subprocess -class MergeTest(ProbackupTest, unittest.TestCase): - def test_basic_merge_full_page(self): +class MergeTest(ProbackupTest): + + def test_basic_merge_full_2page(self): """ - Test MERGE command, it merges FULL backup with target PAGE backups + 1. Full backup -> fill data + 2. First Page backup -> fill data + 3. Second Page backup + 4. Merge 2 "Page" backups + Restore and compare """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=["--data-checksums"]) + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) node.slow_start() # Do full backup - self.backup_node(backup_dir, "node", node, options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[0] + self.pb.backup_node("node", node, options=['--compress']) + show_backup = self.pb.show("node")[0] self.assertEqual(show_backup["status"], "OK") self.assertEqual(show_backup["backup-mode"], "FULL") @@ -42,8 +48,8 @@ def test_basic_merge_full_page(self): conn.commit() # Do first page backup - self.backup_node(backup_dir, "node", node, backup_type="page", options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[1] + self.pb.backup_node("node", node, backup_type="page", options=['--compress']) + show_backup = self.pb.show("node")[1] # sanity check self.assertEqual(show_backup["status"], "OK") @@ -57,10 +63,9 @@ def test_basic_merge_full_page(self): conn.commit() # Do second page backup - self.backup_node( - backup_dir, "node", node, + self.pb.backup_node("node", node, backup_type="page", options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[2] + show_backup = self.pb.show("node")[2] page_id = show_backup["id"] if self.paranoia: @@ -71,9 +76,11 @@ def test_basic_merge_full_page(self): self.assertEqual(show_backup["backup-mode"], "PAGE") # Merge all backups - self.merge_backup(backup_dir, "node", page_id, - options=["-j", "4"]) - show_backups = self.show_pb(backup_dir, "node") + output = self.pb.merge_backup("node", page_id, + options=["-j", "4"]) + self.assertNotIn("WARNING", output) + + show_backups = self.pb.show("node") # sanity check self.assertEqual(len(show_backups), 1) @@ -82,7 +89,7 @@ def test_basic_merge_full_page(self): # Drop node and restore it node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) # Check physical correctness if self.paranoia: @@ -96,88 +103,40 @@ def test_basic_merge_full_page(self): count2 = node.execute("postgres", "select count(*) from test") self.assertEqual(count1, count2) - def test_merge_compressed_backups(self): + @unittest.skipIf(not (have_alg('lz4') and have_alg('zstd')), + "pg_probackup is not compiled with lz4 or zstd support") + def test_merge_compressed_delta_page_ptrack(self): """ - Test MERGE command with compressed backups + 1. Full compressed [zlib, 3] backup -> change data + 2. Delta compressed [pglz, 5] -> change data + 3. Page compressed [lz4, 9] -> change data + 4. Ptrack compressed [zstd, default] + 5. Merge all backups in one + Restore and compare """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=["--data-checksums"]) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Do full compressed backup - self.backup_node(backup_dir, "node", node, options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") - - # Fill with data - with node.connect() as conn: - conn.execute("create table test (id int)") - conn.execute( - "insert into test select i from generate_series(1,10) s(i)") - count1 = conn.execute("select count(*) from test") - conn.commit() - - # Do compressed page backup - self.backup_node( - backup_dir, "node", node, backup_type="page", options=['--compress']) - show_backup = self.show_pb(backup_dir, "node")[1] - page_id = show_backup["id"] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id, options=['-j2']) - show_backups = self.show_pb(backup_dir, "node") - - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True) - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) node.slow_start() - # Check restored node - count2 = node.execute("postgres", "select count(*) from test") - self.assertEqual(count1, count2) - - # Clean after yourself - node.cleanup() - - def test_merge_compressed_backups_1(self): - """ - Test MERGE command with compressed backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"]) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # Fill with data node.pgbench_init(scale=10) # Do compressed FULL backup - self.backup_node(backup_dir, "node", node, options=['--compress', '--stream']) - show_backup = self.show_pb(backup_dir, "node")[0] + self.pb.backup_node("node", node, options=['--stream', + '--compress-level', '3', + '--compress-algorithm', 'zlib']) + show_backup = self.pb.show("node")[0] self.assertEqual(show_backup["status"], "OK") self.assertEqual(show_backup["backup-mode"], "FULL") @@ -187,37 +146,56 @@ def test_merge_compressed_backups_1(self): pgbench.wait() # Do compressed DELTA backup - self.backup_node( - backup_dir, "node", node, - backup_type="delta", options=['--compress', '--stream']) + self.pb.backup_node("node", node, + backup_type="delta", options=['--stream', + '--compress-level', '5', + '--compress-algorithm', 'pglz']) # Change data pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() # Do compressed PAGE backup - self.backup_node( - backup_dir, "node", node, backup_type="page", options=['--compress']) + self.pb.backup_node("node", node, backup_type="page", options=['--compress-level', '9', + '--compress-algorithm', 'lz4']) + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do compressed PTRACK backup + self.pb.backup_node("node", node, backup_type='ptrack', options=['--compress-algorithm', 'zstd']) pgdata = self.pgdata_content(node.data_dir) - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] + # Check backups + show_backup = self.pb.show("node") + self.assertEqual(show_backup[0]["status"], "OK") + self.assertEqual(show_backup[0]["backup-mode"], "FULL") - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") + self.assertEqual(show_backup[1]["status"], "OK") + self.assertEqual(show_backup[1]["backup-mode"], "DELTA") + + self.assertEqual(show_backup[2]["status"], "OK") + self.assertEqual(show_backup[2]["backup-mode"], "PAGE") + + self.assertEqual(show_backup[3]["status"], "OK") + self.assertEqual(show_backup[3]["backup-mode"], "PTRACK") + + ptrack_id = show_backup[3]["id"] # Merge all backups - self.merge_backup(backup_dir, "node", page_id, options=['-j2']) - show_backups = self.show_pb(backup_dir, "node") + self.pb.merge_backup("node", ptrack_id, options=['-j2']) + show_backups = self.pb.show("node") + # Check number of backups and status self.assertEqual(len(show_backups), 1) self.assertEqual(show_backups[0]["status"], "OK") self.assertEqual(show_backups[0]["backup-mode"], "FULL") # Drop node and restore it node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -225,30 +203,38 @@ def test_merge_compressed_backups_1(self): # Clean after yourself node.cleanup() - def test_merge_compressed_and_uncompressed_backups(self): + def test_merge_uncompressed_ptrack_page_delta(self): """ - Test MERGE command with compressed and uncompressed backups + 1. Full uncompressed backup -> change data + 2. uncompressed Ptrack -> change data + 3. uncompressed Page -> change data + 4. uncompressed Delta + 5. Merge all backups in one + Restore and compare """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + backup_dir = self.backup_dir # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"], - ) + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) node.slow_start() + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + # Fill with data node.pgbench_init(scale=10) - # Do compressed FULL backup - self.backup_node(backup_dir, "node", node, options=[ - '--compress-algorithm=zlib', '--stream']) - show_backup = self.show_pb(backup_dir, "node")[0] + # Do uncompressed FULL backup + self.pb.backup_node("node", node, options=['--stream']) + show_backup = self.pb.show("node")[0] self.assertEqual(show_backup["status"], "OK") self.assertEqual(show_backup["backup-mode"], "FULL") @@ -257,109 +243,54 @@ def test_merge_compressed_and_uncompressed_backups(self): pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - # Do compressed DELTA backup - self.backup_node( - backup_dir, "node", node, backup_type="delta", - options=['--compress', '--stream']) + # Do uncompressed PTRACK backup + self.pb.backup_node("node", node, + backup_type="ptrack", options=['--stream']) # Change data pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() # Do uncompressed PAGE backup - self.backup_node(backup_dir, "node", node, backup_type="page") - - pgdata = self.pgdata_content(node.data_dir) - - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id, options=['-j2']) - show_backups = self.show_pb(backup_dir, "node") - - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - node.cleanup() - - def test_merge_compressed_and_uncompressed_backups_1(self): - """ - Test MERGE command with compressed and uncompressed backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"], - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Fill with data - node.pgbench_init(scale=5) - - # Do compressed FULL backup - self.backup_node(backup_dir, "node", node, options=[ - '--compress-algorithm=zlib', '--stream']) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") + self.pb.backup_node("node", node, backup_type="page") # Change data - pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() # Do uncompressed DELTA backup - self.backup_node( - backup_dir, "node", node, backup_type="delta", - options=['--stream']) + self.pb.backup_node("node", node, backup_type='delta') - # Change data - pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) - pgbench.wait() + pgdata = self.pgdata_content(node.data_dir) - # Do compressed PAGE backup - self.backup_node( - backup_dir, "node", node, backup_type="page", - options=['--compress-algorithm=zlib']) + # Check backups + show_backup = self.pb.show("node") + self.assertEqual(show_backup[0]["status"], "OK") + self.assertEqual(show_backup[0]["backup-mode"], "FULL") - pgdata = self.pgdata_content(node.data_dir) + self.assertEqual(show_backup[1]["status"], "OK") + self.assertEqual(show_backup[1]["backup-mode"], "PTRACK") - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] + self.assertEqual(show_backup[2]["status"], "OK") + self.assertEqual(show_backup[2]["backup-mode"], "PAGE") - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") + self.assertEqual(show_backup[3]["status"], "OK") + self.assertEqual(show_backup[3]["backup-mode"], "DELTA") + + ptrack_id = show_backup[3]["id"] # Merge all backups - self.merge_backup(backup_dir, "node", page_id) - show_backups = self.show_pb(backup_dir, "node") + self.pb.merge_backup("node", ptrack_id, options=['-j2']) + show_backups = self.pb.show("node") + # Check number of backups and status self.assertEqual(len(show_backups), 1) self.assertEqual(show_backups[0]["status"], "OK") self.assertEqual(show_backups[0]["backup-mode"], "FULL") # Drop node and restore it node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -367,73 +298,6 @@ def test_merge_compressed_and_uncompressed_backups_1(self): # Clean after yourself node.cleanup() - def test_merge_compressed_and_uncompressed_backups_2(self): - """ - Test MERGE command with compressed and uncompressed backups - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") - - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=["--data-checksums"], - ) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, "node", node) - self.set_archiving(backup_dir, "node", node) - node.slow_start() - - # Fill with data - node.pgbench_init(scale=20) - - # Do uncompressed FULL backup - self.backup_node(backup_dir, "node", node) - show_backup = self.show_pb(backup_dir, "node")[0] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "FULL") - - # Change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do compressed DELTA backup - self.backup_node( - backup_dir, "node", node, backup_type="delta", - options=['--compress-algorithm=zlib', '--stream']) - - # Change data - pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) - pgbench.wait() - - # Do uncompressed PAGE backup - self.backup_node( - backup_dir, "node", node, backup_type="page") - - pgdata = self.pgdata_content(node.data_dir) - - show_backup = self.show_pb(backup_dir, "node")[2] - page_id = show_backup["id"] - - self.assertEqual(show_backup["status"], "OK") - self.assertEqual(show_backup["backup-mode"], "PAGE") - - # Merge all backups - self.merge_backup(backup_dir, "node", page_id) - show_backups = self.show_pb(backup_dir, "node") - - self.assertEqual(len(show_backups), 1) - self.assertEqual(show_backups[0]["status"], "OK") - self.assertEqual(show_backups[0]["backup-mode"], "FULL") - - # Drop node and restore it - node.cleanup() - self.restore_node(backup_dir, 'node', node) - - pgdata_restored = self.pgdata_content(node.data_dir) - self.compare_pgdata(pgdata, pgdata_restored) - # @unittest.skip("skip") def test_merge_tablespaces(self): """ @@ -442,15 +306,11 @@ def test_merge_tablespaces(self): tablespace, take page backup, merge it and restore """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - ) + node = self.pg_node.make_simple('node', set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -461,7 +321,7 @@ def test_merge_tablespaces(self): " from generate_series(0,100) i" ) # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # Create new tablespace self.create_tblspace_in_node(node, 'somedata1') @@ -485,7 +345,7 @@ def test_merge_tablespaces(self): ) # PAGE backup - backup_id = self.backup_node(backup_dir, 'node', node, backup_type="page") + backup_id = self.pb.backup_node('node', node, backup_type="page") pgdata = self.pgdata_content(node.data_dir) @@ -498,10 +358,9 @@ def test_merge_tablespaces(self): ignore_errors=True) node.cleanup() - self.merge_backup(backup_dir, 'node', backup_id) + self.pb.merge_backup('node', backup_id) - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node, options=["-j", "4"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -516,21 +375,18 @@ def test_merge_tablespaces_1(self): drop first tablespace and take delta backup, merge it and restore """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - ) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", "create table t_heap tablespace somedata as select i as id," @@ -549,7 +405,7 @@ def test_merge_tablespaces_1(self): ) # PAGE backup - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") node.safe_psql( "postgres", @@ -561,8 +417,7 @@ def test_merge_tablespaces_1(self): ) # DELTA backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="delta") + backup_id = self.pb.backup_node('node', node, backup_type="delta") pgdata = self.pgdata_content(node.data_dir) @@ -575,10 +430,9 @@ def test_merge_tablespaces_1(self): ignore_errors=True) node.cleanup() - self.merge_backup(backup_dir, 'node', backup_id) + self.pb.merge_backup('node', backup_id) - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=["-j", "4"]) pgdata_restored = self.pgdata_content(node.data_dir) @@ -591,20 +445,17 @@ def test_merge_page_truncate(self): take page backup, merge full and page, restore last page backup and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node_restored.cleanup() node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -621,7 +472,7 @@ def test_merge_page_truncate(self): "postgres", "vacuum t_heap") - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -630,22 +481,20 @@ def test_merge_page_truncate(self): "postgres", "vacuum t_heap") - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - page_id = self.show_pb(backup_dir, "node")[1]["id"] - self.merge_backup(backup_dir, "node", page_id) + page_id = self.pb.show("node")[1]["id"] + self.pb.merge_backup("node", page_id) - self.validate_pb(backup_dir) + self.pb.validate() old_tablespace = self.get_tblspace_path(node, 'somedata') new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) @@ -655,7 +504,7 @@ def test_merge_page_truncate(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # Logical comparison @@ -671,20 +520,17 @@ def test_merge_delta_truncate(self): take page backup, merge full and page, restore last page backup and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node_restored.cleanup() node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -701,7 +547,7 @@ def test_merge_delta_truncate(self): "postgres", "vacuum t_heap") - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -710,22 +556,20 @@ def test_merge_delta_truncate(self): "postgres", "vacuum t_heap") - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - page_id = self.show_pb(backup_dir, "node")[1]["id"] - self.merge_backup(backup_dir, "node", page_id) + page_id = self.pb.show("node")[1]["id"] + self.pb.merge_backup("node", page_id) - self.validate_pb(backup_dir) + self.pb.validate() old_tablespace = self.get_tblspace_path(node, 'somedata') new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) @@ -735,7 +579,7 @@ def test_merge_delta_truncate(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # Logical comparison @@ -754,16 +598,14 @@ def test_merge_ptrack_truncate(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -784,7 +626,7 @@ def test_merge_ptrack_truncate(self): "postgres", "vacuum t_heap") - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -794,25 +636,22 @@ def test_merge_ptrack_truncate(self): "postgres", "vacuum t_heap") - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack') + ptrack_id = self.pb.backup_node('node', node, backup_type='ptrack') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - self.merge_backup(backup_dir, "node", page_id) + self.pb.merge_backup("node", ptrack_id) - self.validate_pb(backup_dir) + self.pb.validate() - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) @@ -822,7 +661,7 @@ def test_merge_ptrack_truncate(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # Logical comparison @@ -838,24 +677,21 @@ def test_merge_delta_delete(self): alter tablespace location, take delta backup, merge full and delta, restore database. """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', set_replication=True, pg_options={ 'checkpoint_timeout': '30s', } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) node.safe_psql( "postgres", @@ -875,8 +711,7 @@ def test_merge_delta_delete(self): ) # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=["--stream"] ) @@ -884,17 +719,15 @@ def test_merge_delta_delete(self): if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - backup_id = self.show_pb(backup_dir, "node")[1]["id"] - self.merge_backup(backup_dir, "node", backup_id, options=["-j", "4"]) + backup_id = self.pb.show("node")[1]["id"] + self.pb.merge_backup("node", backup_id, options=["-j", "4"]) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + node_restored = self.pg_node.make_simple('node_restored' ) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -910,30 +743,27 @@ def test_merge_delta_delete(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") + @needs_gdb def test_continue_failed_merge(self): """ Check that failed MERGE can be continued """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join( - self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -943,8 +773,7 @@ def test_continue_failed_merge(self): ) # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta' + self.pb.backup_node('node', node, backup_type='delta' ) node.safe_psql( @@ -958,54 +787,54 @@ def test_continue_failed_merge(self): ) # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta' + self.pb.backup_node('node', node, backup_type='delta' ) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - backup_id = self.show_pb(backup_dir, "node")[2]["id"] + backup_id = self.pb.show("node")[2]["id"] - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb = self.pb.merge_backup("node", backup_id, gdb=True) gdb.set_breakpoint('backup_non_data_file_internal') gdb.run_until_break() gdb.continue_execution_until_break(5) - gdb._execute('signal SIGKILL') - gdb._execute('detach') + gdb.signal('SIGKILL') + gdb.detach() time.sleep(1) - print(self.show_pb(backup_dir, as_text=True, as_json=False)) + self.expire_locks(backup_dir, 'node') + + print(self.pb.show(as_text=True, as_json=False)) # Try to continue failed MERGE - self.merge_backup(backup_dir, "node", backup_id) + self.pb.merge_backup("node", backup_id) # Drop node and restore it node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) # @unittest.skip("skip") + @needs_gdb def test_continue_failed_merge_with_corrupted_delta_backup(self): """ Fail merge via gdb, corrupt DELTA backup, try to continue merge """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -1018,8 +847,7 @@ def test_continue_failed_merge_with_corrupted_delta_backup(self): "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') node.safe_psql( "postgres", @@ -1034,73 +862,53 @@ def test_continue_failed_merge_with_corrupted_delta_backup(self): "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() # DELTA BACKUP - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_id_2 = self.pb.backup_node('node', node, backup_type='delta') - backup_id = self.show_pb(backup_dir, "node")[1]["id"] + backup_id = self.pb.show("node")[1]["id"] # Failed MERGE - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb = self.pb.merge_backup("node", backup_id, gdb=True) gdb.set_breakpoint('backup_non_data_file_internal') gdb.run_until_break() gdb.continue_execution_until_break(2) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, 'node') # CORRUPT incremental backup # read block from future # block_size + backup_header = 8200 - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id_2, 'database', new_path) - with open(file, 'rb') as f: - f.seek(8200) - block_1 = f.read(8200) - f.close - + file_content2 = self.read_backup_file(backup_dir, 'node', backup_id_2, + f'database/{new_path}')[:16400] # write block from future - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', old_path) - with open(file, 'r+b') as f: - f.seek(8200) - f.write(block_1) - f.close + self.corrupt_backup_file(backup_dir, 'node', backup_id, + f'database/{old_path}', + damage=(8200, file_content2[8200:16400])) # Try to continue failed MERGE - try: - print(self.merge_backup(backup_dir, "node", backup_id)) - self.assertEqual( - 1, 0, - "Expecting Error because of incremental backup corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Backup {0} has status CORRUPT, merge is aborted".format( - backup_id) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.merge_backup("node", backup_id, + expect_error="because of incremental backup corruption") + self.assertMessage(contains=f"ERROR: Backup {backup_id} has status CORRUPT, merge is aborted") + @needs_gdb def test_continue_failed_merge_2(self): """ Check that failed MERGE on delete can be continued """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -1109,8 +917,7 @@ def test_continue_failed_merge_2(self): " from generate_series(0,1000) i") # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') node.safe_psql( "postgres", @@ -1121,52 +928,55 @@ def test_continue_failed_merge_2(self): "vacuum t_heap") # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - backup_id = self.show_pb(backup_dir, "node")[2]["id"] + backup_id = self.pb.show("node")[2]["id"] - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb = self.pb.merge_backup("node", backup_id, gdb=True) - gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('lock_backup') gdb.run_until_break() gdb._execute('thread apply all bt') + gdb.remove_all_breakpoints() + + gdb.set_breakpoint('pioRemoveDir__do') gdb.continue_execution_until_break(20) gdb._execute('thread apply all bt') - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, 'node') - print(self.show_pb(backup_dir, as_text=True, as_json=False)) + print(self.pb.show(as_text=True, as_json=False)) - backup_id_deleted = self.show_pb(backup_dir, "node")[1]["id"] + backup_id_deleted = self.pb.show("node")[1]["id"] # TODO check that full backup has meta info is equal to DELETTING # Try to continue failed MERGE - self.merge_backup(backup_dir, "node", backup_id) + self.pb.merge_backup("node", backup_id) + @needs_gdb def test_continue_failed_merge_3(self): """ Check that failed MERGE cannot be continued if intermediate backup is missing. """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Create test data @@ -1179,7 +989,7 @@ def test_continue_failed_merge_3(self): ) # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # CREATE FEW PAGE BACKUP i = 0 @@ -1203,67 +1013,55 @@ def test_continue_failed_merge_3(self): ) # PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='page' + self.pb.backup_node('node', node, backup_type='page' ) i = i + 1 if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - backup_id_merge = self.show_pb(backup_dir, "node")[2]["id"] - backup_id_delete = self.show_pb(backup_dir, "node")[1]["id"] + backup_id_merge = self.pb.show("node")[2]["id"] + backup_id_delete = self.pb.show("node")[1]["id"] - print(self.show_pb(backup_dir, as_text=True, as_json=False)) + print(self.pb.show(as_text=True, as_json=False)) - gdb = self.merge_backup(backup_dir, "node", backup_id_merge, gdb=True) + gdb = self.pb.merge_backup("node", backup_id_merge, gdb=True) gdb.set_breakpoint('backup_non_data_file_internal') gdb.run_until_break() gdb.continue_execution_until_break(2) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') - print(self.show_pb(backup_dir, as_text=True, as_json=False)) + self.expire_locks(backup_dir, 'node') + + print(self.pb.show(as_text=True, as_json=False)) # print(os.path.join(backup_dir, "backups", "node", backup_id_delete)) # DELETE PAGE1 - shutil.rmtree( - os.path.join(backup_dir, "backups", "node", backup_id_delete)) + self.remove_one_backup(backup_dir, 'node', backup_id_delete) # Try to continue failed MERGE - try: - self.merge_backup(backup_dir, "node", backup_id_merge) - self.assertEqual( - 1, 0, - "Expecting Error because of backup corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Incremental chain is broken, " - "merge is impossible to finish" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.merge_backup("node", backup_id_merge, + expect_error="because of backup corruption") + self.assertMessage(contains="ERROR: Incremental chain is broken, " + "merge is impossible to finish") def test_merge_different_compression_algo(self): """ Check that backups with different compression algorithms can be merged """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node( - backup_dir, 'node', node, options=['--compress-algorithm=zlib']) + self.pb.backup_node('node', node, options=['--compress-algorithm=zlib']) node.safe_psql( "postgres", @@ -1272,8 +1070,7 @@ def test_merge_different_compression_algo(self): " from generate_series(0,1000) i") # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=['--compress-algorithm=pglz']) node.safe_psql( @@ -1285,85 +1082,266 @@ def test_merge_different_compression_algo(self): "vacuum t_heap") # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - backup_id = self.show_pb(backup_dir, "node")[2]["id"] + backup_id = self.pb.show("node")[2]["id"] - self.merge_backup(backup_dir, "node", backup_id) + self.pb.merge_backup("node", backup_id) def test_merge_different_wal_modes(self): """ Check that backups with different wal modes can be merged correctly """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL stream backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # DELTA archive backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_id = self.pb.backup_node('node', node, backup_type='delta') - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) self.assertEqual( - 'ARCHIVE', self.show_pb(backup_dir, 'node', backup_id)['wal']) + 'ARCHIVE', self.pb.show('node', backup_id)['wal']) # DELTA stream backup - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) + + self.assertEqual( + 'STREAM', self.pb.show('node', backup_id)['wal']) + + def test_merge_A_B_C_removes_internal_B(self): + """ + check that A->B->C merge removes B merge stub dir. + """ + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True, + initdb_params=['--data-checksums']) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # FULL + full_id = self.pb.backup_node('node', node, options=['--stream']) + + # DELTA 1 + delta_1_id = self.pb.backup_node('node', node, backup_type='delta') + + # DELTA 2 + delta_2_id = self.pb.backup_node('node', node, backup_type='delta') + + self.pb.merge_backup('node', backup_id=delta_1_id) self.assertEqual( - 'STREAM', self.show_pb(backup_dir, 'node', backup_id)['wal']) + 'ARCHIVE', self.pb.show('node', delta_1_id)['wal']) + self.pb.merge_backup('node', backup_id=delta_2_id) + + backups_dirs_list = self.get_backups_dirs(backup_dir, "node") + + self.assertIn(full_id, backups_dirs_list) + self.assertNotIn(delta_1_id, backups_dirs_list) + self.assertIn(delta_2_id, backups_dirs_list) + + @needs_gdb + def test_merge_A_B_C_broken_on_B_removal(self): + """ + check that A->B->C merge removes B merge stub dir + on second merge try after first merge is killed on B removal. + """ + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True, + initdb_params=['--data-checksums']) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # FULL + full_id = self.pb.backup_node('node', node, options=['--stream']) + + # DELTA 1 + delta_1_id = self.pb.backup_node('node', node, backup_type='delta') + + # DELTA 2 + delta_2_id = self.pb.backup_node('node', node, backup_type='delta') + + self.pb.merge_backup('node', backup_id=delta_1_id) + + self.assertEqual('ARCHIVE', self.pb.show('node', delta_1_id)['wal']) + + gdb = self.pb.merge_backup('node', backup_id=delta_2_id, gdb=True) + + gdb.set_breakpoint('renameBackupToDir') + gdb.run_until_break() + + gdb.set_breakpoint("pgBackupInit") + # breaks after removing interim B dir, before recreating "C" dir as merged_to + gdb.continue_execution_until_break() + # killing merge on in-critical-section broken inside BACKUP_STATUS_MERGES critical section + gdb.kill() + + self.expire_locks(backup_dir, 'node') + + # rerun merge to C, it should merge fine + self.pb.merge_backup('node', backup_id=delta_2_id)#, gdb=("suspend", 30303)) + backups_dirs_list = self.get_backups_dirs(backup_dir, "node") + + self.assertIn(full_id, backups_dirs_list) + self.assertNotIn(delta_1_id, backups_dirs_list) + self.assertIn(delta_2_id, backups_dirs_list) + + def test_merge_A_B_and_remove_A_removes_B(self): + """ + Check, that after A->B merge and remove of B removes both A and B dirs. + """ + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True, + initdb_params=['--data-checksums']) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # PRE FULL + pre_full_id = self.pb.backup_node('node', node, options=['--stream']) + + # FULL + full_id = self.pb.backup_node('node', node, options=['--stream']) + + # DELTA 1 + delta_1_id = self.pb.backup_node('node', node, backup_type='delta') + + # DELTA 2 + delta_2_id = self.pb.backup_node('node', node, backup_type='delta') + + # POST FULL + post_full_id = self.pb.backup_node('node', node, options=['--stream']) + + self.pb.merge_backup('node', backup_id=delta_1_id) + + self.assertEqual( + 'ARCHIVE', self.pb.show('node', delta_1_id)['wal']) + + self.pb.delete('node', backup_id=delta_1_id) + + backups_dirs_list = self.get_backups_dirs(backup_dir, "node") + + # these should be deleted obviously + self.assertNotIn(full_id, backups_dirs_list) + self.assertNotIn(delta_2_id, backups_dirs_list) + # these should not be deleted obviously + self.assertIn(pre_full_id, backups_dirs_list) + self.assertIn(post_full_id, backups_dirs_list) + # and actual check for PBCKP-710: deleted symlink directory + self.assertNotIn(delta_1_id, backups_dirs_list) + + + def test_validate_deleted_dirs_merged_from(self): + """ + checks validate fails if we miss full dir + """ + + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True, + initdb_params=['--data-checksums']) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # FULL 1 + full_1 = self.pb.backup_node('node', node, options=['--stream']) + + # DELTA 1-1 + delta_1_1 = self.pb.backup_node('node', node, backup_type='delta') + + #FULL 2 + full_2 = self.pb.backup_node('node', node, options=['--stream']) + + # DELTA 2-1 + delta_2_1 = self.pb.backup_node('node', node, backup_type='delta') + + # validate is ok + self.pb.merge_backup('node', backup_id=delta_2_1) + self.pb.validate('node') + + # changing DELTA_2_1 backup.control symlink fields + # validate should find problems + with self.modify_backup_control(backup_dir, "node", delta_2_1) as cf: + cf.data += "\nsymlink = " + base36enc(base36dec(delta_2_1)+1) + self.pb.validate('node', expect_error=True) + self.assertMessage(contains="no linked backup") + + with self.modify_backup_control(backup_dir, "node", delta_2_1) as cf: + cf.data = "\n".join(cf.data.splitlines()[:-1]) + + # validate should find previous backup is not FULL + self.remove_one_backup(backup_dir, 'node', full_2) + self.pb.validate('node', expect_error=True) + self.assertMessage(contains="no linked backup") + + # validate should find there's previous FULL backup has bad id or cross references + self.remove_one_backup(backup_dir, 'node', delta_1_1) + with self.modify_backup_control(backup_dir, "node", delta_2_1) as cf: + cf.data += f"\nsymlink = {full_1}" + self.pb.validate('node', expect_error=True) + self.assertMessage(contains="has different 'backup-id'") + + # validate should find there's previous FULL backup has bad id or cross references + self.remove_one_backup(backup_dir, 'node', full_1) + self.pb.validate('node', expect_error=True) + self.assertMessage(contains="no linked backup") + + @needs_gdb def test_crash_after_opening_backup_control_1(self): """ check that crashing after opening backup.control for writing will not result in losing backup metadata """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL stream backup - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # DELTA archive backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_id = self.pb.backup_node('node', node, backup_type='delta') - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb = self.pb.merge_backup("node", backup_id, gdb=True) gdb.set_breakpoint('write_backup_filelist') gdb.run_until_break() @@ -1372,43 +1350,41 @@ def test_crash_after_opening_backup_control_1(self): gdb.set_breakpoint('pgBackupWriteControl') gdb.continue_execution_until_break() - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, 'node') - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) + 'MERGING', self.pb.show('node')[0]['status']) self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) + 'MERGING', self.pb.show('node')[1]['status']) # @unittest.skip("skip") + @needs_gdb def test_crash_after_opening_backup_control_2(self): """ check that crashing after opening backup_content.control for writing will not result in losing metadata about backup files TODO: rewrite """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Add data node.pgbench_init(scale=3) # FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + full_id = self.pb.backup_node('node', node, options=['--stream']) # Change data pgbench = node.pgbench(options=['-T', '20', '-c', '2']) @@ -1425,80 +1401,71 @@ def test_crash_after_opening_backup_control_2(self): 'vacuum pgbench_accounts') # DELTA backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_id = self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb = self.pb.merge_backup("node", backup_id, gdb=True) gdb.set_breakpoint('write_backup_filelist') gdb.run_until_break() # gdb.set_breakpoint('sprintf') # gdb.continue_execution_until_break(1) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, 'node') - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) + 'MERGING', self.pb.show('node')[0]['status']) self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) + 'MERGING', self.pb.show('node')[1]['status']) # In to_backup drop file that comes from from_backup # emulate crash during previous merge - file_to_remove = os.path.join( - backup_dir, 'backups', - 'node', full_id, 'database', fsm_path) - - # print(file_to_remove) - - os.remove(file_to_remove) + self.remove_backup_file(backup_dir, 'node', full_id, + f'database/{fsm_path}') # Continue failed merge - self.merge_backup(backup_dir, "node", backup_id) + self.pb.merge_backup("node", backup_id) node.cleanup() # restore merge backup - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @needs_gdb def test_losing_file_after_failed_merge(self): """ check that crashing after opening backup_content.control for writing will not result in losing metadata about backup files TODO: rewrite """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Add data node.pgbench_init(scale=1) # FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + full_id = self.pb.backup_node('node', node, options=['--stream']) # Change data node.safe_psql( @@ -1516,67 +1483,61 @@ def test_losing_file_after_failed_merge(self): vm_path = path + '_vm' # DELTA backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_id = self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) - gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) + gdb = self.pb.merge_backup("node", backup_id, gdb=True) gdb.set_breakpoint('write_backup_filelist') gdb.run_until_break() # gdb.set_breakpoint('sprintf') # gdb.continue_execution_until_break(20) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, 'node') - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) + 'MERGING', self.pb.show('node')[0]['status']) self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) + 'MERGING', self.pb.show('node')[1]['status']) # In to_backup drop file that comes from from_backup # emulate crash during previous merge - file_to_remove = os.path.join( - backup_dir, 'backups', - 'node', full_id, 'database', vm_path) - - os.remove(file_to_remove) + self.remove_backup_file(backup_dir, 'node', full_id, + f'database/{vm_path}') # Try to continue failed MERGE - self.merge_backup(backup_dir, "node", backup_id) + self.pb.merge_backup("node", backup_id) self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) + @needs_gdb def test_failed_merge_after_delete(self): """ """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # add database @@ -1589,8 +1550,7 @@ def test_failed_merge_after_delete(self): "select oid from pg_database where datname = 'testdb'").decode('utf-8').rstrip() # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + full_id = self.pb.backup_node('node', node, options=['--stream']) # drop database node.safe_psql( @@ -1598,75 +1558,56 @@ def test_failed_merge_after_delete(self): 'DROP DATABASE testdb') # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='page') - page_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_2 = self.pb.backup_node('node', node, backup_type='page') - gdb = self.merge_backup( - backup_dir, 'node', page_id, + gdb = self.pb.merge_backup('node', page_id, gdb=True, options=['--log-level-console=verbose']) gdb.set_breakpoint('delete_backup_files') gdb.run_until_break() - gdb.set_breakpoint('pgFileDelete') - gdb.continue_execution_until_break(20) + gdb.set_breakpoint('lock_backup') + gdb.continue_execution_until_break() - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, 'node') # backup half-merged self.assertEqual( - 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) + 'MERGED', self.pb.show('node')[0]['status']) self.assertEqual( - full_id, self.show_pb(backup_dir, 'node')[0]['id']) - - db_path = os.path.join( - backup_dir, 'backups', 'node', - full_id, 'database', 'base', dboid) + full_id, self.pb.show('node')[0]['id']) - try: - self.merge_backup( - backup_dir, 'node', page_id_2, - options=['--log-level-console=verbose']) - self.assertEqual( - 1, 0, - "Expecting Error because of missing parent.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Full backup {0} has unfinished merge with backup {1}".format( - full_id, page_id) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.merge_backup('node', page_id_2, + options=['--log-level-console=verbose'], + expect_error="because of missing parent") + self.assertMessage(contains=f"ERROR: Full backup {full_id} has " + f"unfinished merge with backup {page_id}") + @needs_gdb def test_failed_merge_after_delete_1(self): """ """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + full_id = self.pb.backup_node('node', node, options=['--stream']) node.pgbench_init(scale=1) - page_1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_1 = self.pb.backup_node('node', node, backup_type='page') # Change PAGE1 backup status to ERROR self.change_backup_status(backup_dir, 'node', page_1, 'ERROR') @@ -1678,14 +1619,12 @@ def test_failed_merge_after_delete_1(self): pgbench.wait() # take PAGE2 backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='page') # Change PAGE1 backup status to OK self.change_backup_status(backup_dir, 'node', page_1, 'OK') - gdb = self.merge_backup( - backup_dir, 'node', page_id, + gdb = self.pb.merge_backup('node', page_id, gdb=True, options=['--log-level-console=verbose']) gdb.set_breakpoint('delete_backup_files') @@ -1694,105 +1633,80 @@ def test_failed_merge_after_delete_1(self): # gdb.set_breakpoint('parray_bsearch') # gdb.continue_execution_until_break() - gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('lock_backup') gdb.continue_execution_until_break(30) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, 'node') self.assertEqual( - full_id, self.show_pb(backup_dir, 'node')[0]['id']) + full_id, self.pb.show('node')[0]['id']) # restore node.cleanup() - try: - #self.restore_node(backup_dir, 'node', node, backup_id=page_1) - self.restore_node(backup_dir, 'node', node) - self.assertEqual( - 1, 0, - "Expecting Error because of orphan status.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} is orphan".format(page_1), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + expect_error="because of orphan status") + self.assertMessage(contains=f"ERROR: Backup {page_1} is orphan") + @needs_gdb def test_failed_merge_after_delete_2(self): """ """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + full_id = self.pb.backup_node('node', node, options=['--stream']) node.pgbench_init(scale=1) - page_1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_1 = self.pb.backup_node('node', node, backup_type='page') # add data pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum']) pgbench.wait() # take PAGE2 backup - page_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_2 = self.pb.backup_node('node', node, backup_type='page') - gdb = self.merge_backup( - backup_dir, 'node', page_2, gdb=True, + gdb = self.pb.merge_backup('node', page_2, gdb=True, options=['--log-level-console=VERBOSE']) - gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('delete_backup_files') gdb.run_until_break() + gdb.set_breakpoint('lock_backup') gdb.continue_execution_until_break(2) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') - self.delete_pb(backup_dir, 'node', backup_id=page_2) + self.expire_locks(backup_dir, 'node') + + self.pb.delete('node', backup_id=page_2) # rerun merge - try: - #self.restore_node(backup_dir, 'node', node, backup_id=page_1) - self.merge_backup(backup_dir, 'node', page_1) - self.assertEqual( - 1, 0, - "Expecting Error because of backup is missing.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Full backup {0} has unfinished merge " - "with backup {1}".format(full_id, page_2), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.merge_backup('node', page_1, + expect_error="because backup is missing") + self.assertMessage(contains=f"ERROR: Full backup {full_id} has unfinished merge " + f"with backup {page_2}") + @needs_gdb def test_failed_merge_after_delete_3(self): """ """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # add database @@ -1805,8 +1719,7 @@ def test_failed_merge_after_delete_3(self): "select oid from pg_database where datname = 'testdb'").rstrip() # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + full_id = self.pb.backup_node('node', node, options=['--stream']) # drop database node.safe_psql( @@ -1814,57 +1727,42 @@ def test_failed_merge_after_delete_3(self): 'DROP DATABASE testdb') # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='page') # create database node.safe_psql( 'postgres', 'create DATABASE testdb') - page_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_2 = self.pb.backup_node('node', node, backup_type='page') - gdb = self.merge_backup( - backup_dir, 'node', page_id, + gdb = self.pb.merge_backup('node', page_id, gdb=True, options=['--log-level-console=verbose']) gdb.set_breakpoint('delete_backup_files') gdb.run_until_break() - gdb.set_breakpoint('pgFileDelete') - gdb.continue_execution_until_break(20) + gdb.set_breakpoint('lock_backup') + gdb.continue_execution_until_break() + + gdb.signal('SIGKILL') - gdb._execute('signal SIGKILL') + self.expire_locks(backup_dir, 'node') # backup half-merged self.assertEqual( - 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) + 'MERGED', self.pb.show('node')[0]['status']) self.assertEqual( - full_id, self.show_pb(backup_dir, 'node')[0]['id']) - - db_path = os.path.join( - backup_dir, 'backups', 'node', full_id) + full_id, self.pb.show('node')[0]['id']) # FULL backup is missing now - shutil.rmtree(db_path) + self.remove_one_backup(backup_dir, 'node', full_id) - try: - self.merge_backup( - backup_dir, 'node', page_id_2, - options=['--log-level-console=verbose']) - self.assertEqual( - 1, 0, - "Expecting Error because of missing parent.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Failed to find parent full backup for {0}".format( - page_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.merge_backup('node', page_id_2, + options=['--log-level-console=verbose'], + expect_error="because of missing parent") + self.assertMessage(contains=f"ERROR: Failed to find parent full backup for {page_id_2}") # Skipped, because backups from the future are invalid. # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" @@ -1876,35 +1774,32 @@ def test_merge_backup_from_future(self): take FULL backup, table PAGE backup from future, try to merge page with FULL """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + if not backup_dir.is_file_based: + self.skipTest("test uses rename which is hard for cloud") + + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=5) # Take PAGE from future - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') - with open( - os.path.join( - backup_dir, 'backups', 'node', - backup_id, "backup.control"), "a") as conf: - conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() + timedelta(days=3))) + with self.modify_backup_control(backup_dir, 'node', backup_id) as cf: + cf.data += "\nstart-time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() + timedelta(days=3)) # rename directory - new_id = self.show_pb(backup_dir, 'node')[1]['id'] + new_id = self.pb.show('node')[1]['id'] os.rename( os.path.join(backup_dir, 'backups', 'node', backup_id), @@ -1913,17 +1808,15 @@ def test_merge_backup_from_future(self): pgbench = node.pgbench(options=['-T', '5', '-c', '1', '--no-vacuum']) pgbench.wait() - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) result = node.table_checksum("pgbench_accounts") - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored, backup_id=backup_id) pgdata_restored = self.pgdata_content(node_restored.data_dir) @@ -1931,15 +1824,12 @@ def test_merge_backup_from_future(self): # check that merged backup has the same state as node_restored.cleanup() - self.merge_backup(backup_dir, 'node', backup_id=backup_id) - self.restore_node( - backup_dir, 'node', + self.pb.merge_backup('node', backup_id=backup_id) + self.pb.restore_node('node', node_restored, backup_id=backup_id) pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.set_auto_conf( - node_restored, - {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() result_new = node_restored.table_checksum("pgbench_accounts") @@ -1950,7 +1840,7 @@ def test_merge_backup_from_future(self): # @unittest.skip("skip") def test_merge_multiple_descendants(self): - """ + r""" PAGEb3 | PAGEa3 PAGEb2 / @@ -1960,26 +1850,23 @@ def test_merge_multiple_descendants(self): FULLb | FULLa """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.pb.backup_node('node', node) # Change FULLb backup status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') # Change FULLb backup status to OK self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') @@ -1991,8 +1878,7 @@ def test_merge_multiple_descendants(self): # FULLb OK # FULLa OK - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b1 = self.pb.backup_node('node', node, backup_type='page') # PAGEb1 OK # PAGEa1 ERROR @@ -2011,8 +1897,7 @@ def test_merge_multiple_descendants(self): # FULLb ERROR # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # PAGEa2 OK # PAGEb1 ERROR @@ -2034,8 +1919,7 @@ def test_merge_multiple_descendants(self): # FULLb OK # FULLa ERROR - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b2 = self.pb.backup_node('node', node, backup_type='page') # PAGEb2 OK # PAGEa2 ERROR @@ -2059,8 +1943,7 @@ def test_merge_multiple_descendants(self): # FULLb ERROR # FULLa OK - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a3 = self.pb.backup_node('node', node, backup_type='page') # PAGEa3 OK # PAGEb2 ERROR @@ -2078,8 +1961,7 @@ def test_merge_multiple_descendants(self): self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK') - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b3 = self.pb.backup_node('node', node, backup_type='page') # PAGEb3 OK # PAGEa3 ERROR @@ -2106,32 +1988,20 @@ def test_merge_multiple_descendants(self): # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + self.pb.show('node', backup_id=page_id_a3)['parent-backup-id'], page_id_a1) self.assertEqual( - self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + self.pb.show('node', backup_id=page_id_a2)['parent-backup-id'], page_id_a1) - self.merge_backup( - backup_dir, 'node', page_id_a2, + self.pb.merge_backup('node', page_id_a2, options=['--merge-expired', '--log-level-console=log']) - try: - self.merge_backup( - backup_dir, 'node', page_id_a3, - options=['--merge-expired', '--log-level-console=log']) - self.assertEqual( - 1, 0, - "Expecting Error because of parent FULL backup is missing.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Failed to find parent full backup for {0}".format( - page_id_a3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.merge_backup('node', page_id_a3, + options=['--merge-expired', '--log-level-console=log'], + expect_error="parent FULL backup is missing") + self.assertMessage(contains=f"ERROR: Failed to find parent full backup for {page_id_a3}") # @unittest.skip("skip") def test_smart_merge(self): @@ -2142,15 +2012,13 @@ def test_smart_merge(self): copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # create database @@ -2159,7 +2027,7 @@ def test_smart_merge(self): "CREATE DATABASE testdb") # take FULL backup - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # drop database node.safe_psql( @@ -2167,42 +2035,39 @@ def test_smart_merge(self): "DROP DATABASE testdb") # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='page') # get delta between FULL and PAGE filelists - filelist_full = self.get_backup_filelist( - backup_dir, 'node', full_id) + filelist_full = self.get_backup_filelist(backup_dir, 'node', full_id) - filelist_page = self.get_backup_filelist( - backup_dir, 'node', page_id) + # merge PAGE backup + self.pb.merge_backup('node', page_id, + options=['--log-level-file=VERBOSE']) + + filelist_full_after_merge = self.get_backup_filelist(backup_dir, 'node', full_id) filelist_diff = self.get_backup_filelist_diff( - filelist_full, filelist_page) + filelist_full, filelist_full_after_merge) - # merge PAGE backup - self.merge_backup( - backup_dir, 'node', page_id, - options=['--log-level-file=VERBOSE']) + logfile_content = self.read_pb_log() - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() + self.assertTrue(filelist_diff, "There should be deleted files") + for file in filelist_diff: + self.assertIn(file, logfile_content) + + @needs_gdb def test_idempotent_merge(self): """ """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # add database @@ -2211,8 +2076,7 @@ def test_idempotent_merge(self): 'CREATE DATABASE testdb') # take FULL backup - full_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + full_id = self.pb.backup_node('node', node, options=['--stream']) # create database node.safe_psql( @@ -2220,47 +2084,50 @@ def test_idempotent_merge(self): 'create DATABASE testdb1') # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='delta') # create database node.safe_psql( 'postgres', 'create DATABASE testdb2') - page_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_2 = self.pb.backup_node('node', node, backup_type='delta') - gdb = self.merge_backup( - backup_dir, 'node', page_id_2, - gdb=True, options=['--log-level-console=verbose']) + gdb = self.pb.merge_backup('node', page_id_2, + gdb=True, options=['--log-level-console=log']) gdb.set_breakpoint('delete_backup_files') gdb.run_until_break() gdb.remove_all_breakpoints() - gdb.set_breakpoint('rename') + gdb.set_breakpoint("renameBackupToDir") gdb.continue_execution_until_break() - gdb.continue_execution_until_break(2) + gdb.set_breakpoint("write_backup") + gdb.continue_execution_until_break() + gdb.set_breakpoint("pgBackupFree") + gdb.continue_execution_until_break() + - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') - show_backups = self.show_pb(backup_dir, "node") + self.expire_locks(backup_dir, 'node') + + show_backups = self.pb.show("node") self.assertEqual(len(show_backups), 1) self.assertEqual( - 'MERGED', self.show_pb(backup_dir, 'node')[0]['status']) + 'MERGED', self.pb.show('node')[0]['status']) self.assertEqual( - full_id, self.show_pb(backup_dir, 'node')[0]['id']) + full_id, self.pb.show('node')[0]['id']) - self.merge_backup(backup_dir, 'node', page_id_2) + self.pb.merge_backup('node', page_id_2) self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) self.assertEqual( - page_id_2, self.show_pb(backup_dir, 'node')[0]['id']) + page_id_2, self.pb.show('node')[0]['id']) def test_merge_correct_inheritance(self): """ @@ -2268,15 +2135,13 @@ def test_merge_correct_inheritance(self): 'note' and 'expire-time' are correctly inherited during merge """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # add database @@ -2285,7 +2150,7 @@ def test_merge_correct_inheritance(self): 'CREATE DATABASE testdb') # take FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # create database node.safe_psql( @@ -2293,25 +2158,23 @@ def test_merge_correct_inheritance(self): 'create DATABASE testdb1') # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='page') - self.set_backup( - backup_dir, 'node', page_id, options=['--note=hello', '--ttl=20d']) + self.pb.set_backup('node', page_id, options=['--note=hello', '--ttl=20d']) - page_meta = self.show_pb(backup_dir, 'node', page_id) + page_meta = self.pb.show('node', page_id) - self.merge_backup(backup_dir, 'node', page_id) + self.pb.merge_backup('node', page_id) - print(self.show_pb(backup_dir, 'node', page_id)) + print(self.pb.show('node', page_id)) self.assertEqual( page_meta['note'], - self.show_pb(backup_dir, 'node', page_id)['note']) + self.pb.show('node', page_id)['note']) self.assertEqual( page_meta['expire-time'], - self.show_pb(backup_dir, 'node', page_id)['expire-time']) + self.pb.show('node', page_id)['expire-time']) def test_merge_correct_inheritance_1(self): """ @@ -2319,15 +2182,13 @@ def test_merge_correct_inheritance_1(self): 'note' and 'expire-time' are correctly inherited during merge """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # add database @@ -2336,8 +2197,7 @@ def test_merge_correct_inheritance_1(self): 'CREATE DATABASE testdb') # take FULL backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=['--stream', '--note=hello', '--ttl=20d']) # create database @@ -2346,18 +2206,17 @@ def test_merge_correct_inheritance_1(self): 'create DATABASE testdb1') # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='page') - self.merge_backup(backup_dir, 'node', page_id) + self.pb.merge_backup('node', page_id) self.assertNotIn( 'note', - self.show_pb(backup_dir, 'node', page_id)) + self.pb.show('node', page_id)) self.assertNotIn( 'expire-time', - self.show_pb(backup_dir, 'node', page_id)) + self.pb.show('node', page_id)) # @unittest.skip("skip") # @unittest.expectedFailure @@ -2373,15 +2232,12 @@ def test_multi_timeline_merge(self): P must have F as parent """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql("postgres", "create extension pageinspect") @@ -2396,16 +2252,15 @@ def test_multi_timeline_merge(self): "create extension amcheck_next") node.pgbench_init(scale=20) - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, + self.pb.restore_node('node', node, backup_id=full_id, options=[ '--recovery-target=immediate', '--recovery-target-action=promote']) @@ -2418,8 +2273,7 @@ def test_multi_timeline_merge(self): # create timelines for i in range(2, 7): node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target=latest', '--recovery-target-action=promote', @@ -2432,23 +2286,22 @@ def test_multi_timeline_merge(self): # create backup at 2, 4 and 6 timeline if i % 2 == 0: - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') - page_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) - self.merge_backup(backup_dir, 'node', page_id) + self.pb.merge_backup('node', page_id) result = node.table_checksum("pgbench_accounts") - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() result_new = node_restored.table_checksum("pgbench_accounts") @@ -2457,308 +2310,281 @@ def test_multi_timeline_merge(self): self.compare_pgdata(pgdata, pgdata_restored) - self.checkdb_node( - backup_dir, - 'node', + self.pb.checkdb_node( + use_backup_dir=True, + instance='node', options=[ '--amcheck', '-d', 'postgres', '-p', str(node.port)]) - self.checkdb_node( - backup_dir, - 'node', + self.pb.checkdb_node( + use_backup_dir=True, + instance='node', options=[ '--amcheck', '-d', 'postgres', '-p', str(node_restored.port)]) # @unittest.skip("skip") # @unittest.expectedFailure + @needs_gdb def test_merge_page_header_map_retry(self): """ page header map cannot be trusted when running retry """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=20) - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - delta_id = self.backup_node( - backup_dir, 'node', node, + delta_id = self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - gdb = self.merge_backup(backup_dir, 'node', delta_id, gdb=True) + gdb = self.pb.merge_backup('node', delta_id, gdb=True) # our goal here is to get full backup with merged data files, # but with old page header map gdb.set_breakpoint('cleanup_header_map') gdb.run_until_break() - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') + + self.expire_locks(backup_dir, 'node') - self.merge_backup(backup_dir, 'node', delta_id) + self.pb.merge_backup('node', delta_id) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @needs_gdb def test_missing_data_file(self): """ """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Add data node.pgbench_init(scale=1) # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # Change data pgbench = node.pgbench(options=['-T', '5', '-c', '1']) pgbench.wait() # DELTA backup - delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + delta_id = self.pb.backup_node('node', node, backup_type='delta') path = node.safe_psql( 'postgres', "select pg_relation_filepath('pgbench_accounts')").decode('utf-8').rstrip() - gdb = self.merge_backup( - backup_dir, "node", delta_id, + gdb = self.pb.merge_backup("node", delta_id, options=['--log-level-file=VERBOSE'], gdb=True) gdb.set_breakpoint('merge_files') gdb.run_until_break() # remove data file in incremental backup - file_to_remove = os.path.join( - backup_dir, 'backups', - 'node', delta_id, 'database', path) - - os.remove(file_to_remove) + self.remove_backup_file(backup_dir, 'node', delta_id, + f'database/{path}') gdb.continue_execution_until_error() - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() + logfile_content = self.read_pb_log() - self.assertIn( - 'ERROR: Cannot open backup file "{0}": No such file or directory'.format(file_to_remove), - logfile_content) + if fs_backup_class.is_file_based: + self.assertRegex( + logfile_content, + 'ERROR: Open backup file: Cannot open file "[^"]*{0}": No such file or directory'.format(path)) + else: # suggesting S3 for minio, S3TestBackupDir + regex = 'ERROR: Open backup file: S3 error [0-9a-fA-F]+:[^:]+:/[^\\n].*{0}:NoSuchKey:404: No such file'.format( + path) + self.assertRegex( + logfile_content, + regex) # @unittest.skip("skip") + @needs_gdb def test_missing_non_data_file(self): """ """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # DELTA backup - delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + delta_id = self.pb.backup_node('node', node, backup_type='delta') - gdb = self.merge_backup( - backup_dir, "node", delta_id, + gdb = self.pb.merge_backup("node", delta_id, options=['--log-level-file=VERBOSE'], gdb=True) gdb.set_breakpoint('merge_files') gdb.run_until_break() # remove data file in incremental backup - file_to_remove = os.path.join( - backup_dir, 'backups', - 'node', delta_id, 'database', 'backup_label') - - os.remove(file_to_remove) + self.remove_backup_file(backup_dir, 'node', delta_id, + 'database/backup_label') gdb.continue_execution_until_error() - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() + logfile_content = self.read_pb_log() - self.assertIn( - 'ERROR: File "{0}" is not found'.format(file_to_remove), - logfile_content) + self.assertRegex( + logfile_content, + 'ERROR: File "[^"]*backup_label" is not found') self.assertIn( 'ERROR: Backup files merging failed', logfile_content) self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) + 'MERGING', self.pb.show('node')[0]['status']) self.assertEqual( - 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) + 'MERGING', self.pb.show('node')[1]['status']) # @unittest.skip("skip") + @needs_gdb def test_merge_remote_mode(self): """ """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # DELTA backup - delta_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + delta_id = self.pb.backup_node('node', node, backup_type='delta') - self.set_config(backup_dir, 'node', options=['--retention-window=1']) + self.pb.set_config('node', options=['--retention-window=1']) - backups = os.path.join(backup_dir, 'backups', 'node') - with open( - os.path.join( - backups, full_id, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=5))) + with self.modify_backup_control(backup_dir, 'node', full_id) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=5)) - gdb = self.backup_node( - backup_dir, "node", node, + gdb = self.pb.backup_node("node", node, options=['--log-level-file=VERBOSE', '--merge-expired'], gdb=True) gdb.set_breakpoint('merge_files') gdb.run_until_break() - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - - with open(logfile, "w+") as f: - f.truncate() + logfile_content_pre_len = len(self.read_pb_log()) gdb.continue_execution_until_exit() - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() + logfile_content = self.read_pb_log()[logfile_content_pre_len:] self.assertNotIn( 'SSH', logfile_content) self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.pb.show('node')[0]['status']) def test_merge_pg_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - initdb_params=['--data-checksums']) + node1 = self.pg_node.make_simple('node1') node1.cleanup() node.pgbench_init(scale=5) # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, options=['-T', '10', '-c', '1']) - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') node.safe_psql( 'postgres', 'reindex index pg_type_oid_index') - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_id = self.pb.backup_node('node', node, backup_type='delta') - self.merge_backup(backup_dir, 'node', backup_id) + self.pb.merge_backup('node', backup_id) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() node.safe_psql( 'postgres', 'select 1') + @needs_gdb def test_unfinished_merge(self): """ Test when parent has unfinished merge with a different backup. """ - self._check_gdb_flag_or_skip_test() - cases = [('fail_merged', 'write_backup_filelist', ['MERGED', 'MERGING', 'OK']), - ('fail_merging', 'pgBackupWriteControl', ['MERGING', 'OK', 'OK'])] + cases = [('fail_merged', 'delete_backup_files', ['MERGED', 'MERGING', 'OK']), + ('fail_merging', 'create_directories_in_full', ['MERGING', 'MERGING', 'OK'])] for name, terminate_at, states in cases: node_name = 'node_' + name - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, name) - node = self.make_simple_node( + backup_dir = self.backup_dir + self.backup_dir.cleanup() + node = self.pg_node.make_simple( base_dir=os.path.join(self.module_name, self.fname, node_name), - set_replication=True, - initdb_params=['--data-checksums']) + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, node_name, node) - self.set_archiving(backup_dir, node_name, node) + self.pb.init() + self.pb.add_instance(node_name, node) + self.pb.set_archiving(node_name, node) node.slow_start() - full_id=self.backup_node(backup_dir, node_name, node, options=['--stream']) + full_id=self.pb.backup_node(node_name, node, options=['--stream']) - backup_id = self.backup_node(backup_dir, node_name, node, backup_type='delta') - second_backup_id = self.backup_node(backup_dir, node_name, node, backup_type='delta') + backup_id = self.pb.backup_node(node_name, node, backup_type='delta') + second_backup_id = self.pb.backup_node(node_name, node, backup_type='delta') - gdb = self.merge_backup(backup_dir, node_name, backup_id, gdb=True) + gdb = self.pb.merge_backup(node_name, backup_id, gdb=True) gdb.set_breakpoint(terminate_at) gdb.run_until_break() @@ -2766,14 +2592,898 @@ def test_unfinished_merge(self): gdb._execute('signal SIGINT') gdb.continue_execution_until_error() - print(self.show_pb(backup_dir, node_name, as_json=False, as_text=True)) + print(self.pb.show(node_name, as_json=False, as_text=True)) - for expected, real in zip(states, self.show_pb(backup_dir, node_name), strict=True): + backup_infos = self.pb.show(node_name) + self.assertEqual(len(backup_infos), len(states)) + for expected, real in zip(states, backup_infos): self.assertEqual(expected, real['status']) - with self.assertRaisesRegex(ProbackupException, + with self.assertRaisesRegex(Exception, f"Full backup {full_id} has unfinished merge with backup {backup_id}"): - self.merge_backup(backup_dir, node_name, second_backup_id, gdb=False) + self.pb.merge_backup(node_name, second_backup_id, gdb=False) + + @needs_gdb + def test_continue_failed_merge_with_corrupted_full_backup(self): + """ + Fail merge via gdb with corrupted FULL backup + """ + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + node.safe_psql( + "postgres", + "create table t_heap as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,1000) i") + + old_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + # FULL backup + self.pb.backup_node('node', node) + + node.safe_psql( + "postgres", + "update t_heap set id = 100500") + + node.safe_psql( + "postgres", + "vacuum full t_heap") + + new_path = node.safe_psql( + "postgres", + "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() + + # DELTA BACKUP + backup_id_1 = self.pb.backup_node('node', node, backup_type='delta') + + full_id = self.pb.show("node")[0]["id"] + + # CORRUPT full backup + # read block from future + # block_size + backup_header = 8200 + file_content2 = self.read_backup_file(backup_dir, 'node', backup_id_1, + f'database/{new_path}')[:16400] + # write block from future + self.corrupt_backup_file(backup_dir, 'node', full_id, + f'database/{old_path}', + damage=(8200, file_content2[8200:16400])) + + # Try to continue failed MERGE + self.pb.merge_backup("node", backup_id_1, + expect_error=f"WARNING: Backup {full_id} data files are corrupted") + self.assertMessage(contains=f"ERROR: Backup {full_id} has status CORRUPT, merge is aborted") + + # Check number of backups + show_res = self.pb.show("node") + self.assertEqual(len(show_res), 2) + + @unittest.skipIf(not (have_alg('lz4') and have_alg('zstd')), + "pg_probackup is not compiled with lz4 or zstd support") + def test_merge_compressed_and_uncompressed(self): + """ + 1. Full compressed [zlib, 3] backup -> change data + 2. Delta uncompressed -> change data + 3. Page compressed [lz4, 2] -> change data + 5. Merge all backups in one + Restore and compare + """ + backup_dir = self.backup_dir + + # Initialize instance and backup directory + node = self.pg_node.make_simple('node', set_replication=True) + + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=10) + + # Do compressed FULL backup + self.pb.backup_node("node", node, options=['--stream', + '--compress-level', '3', + '--compress-algorithm', 'zlib']) + show_backup = self.pb.show("node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed DELTA backup + self.pb.backup_node("node", node, + backup_type="delta") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do compressed PAGE backup + self.pb.backup_node("node", node, backup_type="page", options=['--compress-level', '2', + '--compress-algorithm', 'lz4']) + + pgdata = self.pgdata_content(node.data_dir) + + # Check backups + show_backup = self.pb.show("node") + self.assertEqual(len(show_backup), 3) + self.assertEqual(show_backup[0]["status"], "OK") + self.assertEqual(show_backup[0]["backup-mode"], "FULL") + + self.assertEqual(show_backup[1]["status"], "OK") + self.assertEqual(show_backup[1]["backup-mode"], "DELTA") + + self.assertEqual(show_backup[2]["status"], "OK") + self.assertEqual(show_backup[2]["backup-mode"], "PAGE") + + page_id = show_backup[2]["id"] + + # Merge all backups + self.pb.merge_backup("node", page_id, options=['-j5']) + show_backups = self.pb.show("node") + + # Check number of backups and status + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.pb.restore_node('node', node=node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + node.cleanup() + + def test_merge_with_error_backup_in_the_middle(self): + """ + 1. Full uncompressed backup -> change data + 2. Delta with error (stop node) -> change data + 3. Page -> change data + 4. Delta -> change data + 5. Merge all backups in one + Restore and compare + """ + backup_dir = self.backup_dir + + # Initialize instance and backup directory + node = self.pg_node.make_simple('node', set_replication=True) + + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=10) + + # Do uncompressed FULL backup + self.pb.backup_node("node", node) + show_backup = self.pb.show("node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + node.stop() + + # Try to create a DELTA backup with disabled archiving (expecting error) + self.pb.backup_node("node", node, backup_type="delta", expect_error=True) + + # Enable archiving + node.slow_start() + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do PAGE backup + self.pb.backup_node("node", node, backup_type="page") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do DELTA backup + self.pb.backup_node("node", node, backup_type="delta") + + # Check backups + show_backup = self.pb.show("node") + self.assertEqual(len(show_backup), 4) + self.assertEqual(show_backup[0]["status"], "OK") + self.assertEqual(show_backup[0]["backup-mode"], "FULL") + + self.assertEqual(show_backup[1]["status"], "ERROR") + self.assertEqual(show_backup[1]["backup-mode"], "DELTA") + + self.assertEqual(show_backup[2]["status"], "OK") + self.assertEqual(show_backup[2]["backup-mode"], "PAGE") + + self.assertEqual(show_backup[3]["status"], "OK") + self.assertEqual(show_backup[3]["backup-mode"], "DELTA") + + delta_id = show_backup[3]["id"] + + # Merge all backups + self.pb.merge_backup("node", delta_id, options=['-j5']) + show_backup = self.pb.show("node") + + # Check number of backups and status + self.assertEqual(len(show_backup), 2) + self.assertEqual(show_backup[0]["status"], "ERROR") + self.assertEqual(show_backup[0]["backup-mode"], "DELTA") + + self.assertEqual(show_backup[1]["status"], "OK") + self.assertEqual(show_backup[1]["backup-mode"], "FULL") + + def test_merge_with_deleted_backup_in_the_middle(self): + """ + 1. Full uncompressed backup -> change data + 2. Delta uncompressed -> change data + 3. 1 Page uncompressed -> change data + 4. 2 Page uncompressed + 5. Remove 1 Page backup + 5. Merge all backups in one + Restore and compare + """ + backup_dir = self.backup_dir + + # Initialize instance and backup directory + node = self.pg_node.make_simple('node', set_replication=True) + + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=10) + + # Do uncompressed FULL backup + self.pb.backup_node("node", node) + show_backup = self.pb.show("node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed DELTA backup + self.pb.backup_node("node", node, + backup_type="delta") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed 1 PAGE backup + self.pb.backup_node("node", node, backup_type="page") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed 2 PAGE backup + self.pb.backup_node("node", node, backup_type="page") + + pgdata = self.pgdata_content(node.data_dir) + + # Check backups + show_backup = self.pb.show("node") + self.assertEqual(len(show_backup), 4) + self.assertEqual(show_backup[0]["status"], "OK") + self.assertEqual(show_backup[0]["backup-mode"], "FULL") + + self.assertEqual(show_backup[1]["status"], "OK") + self.assertEqual(show_backup[1]["backup-mode"], "DELTA") + + self.assertEqual(show_backup[2]["status"], "OK") + self.assertEqual(show_backup[2]["backup-mode"], "PAGE") + + self.assertEqual(show_backup[3]["status"], "OK") + self.assertEqual(show_backup[3]["backup-mode"], "PAGE") + + first_page_id = show_backup[2]["id"] + second_page_id = show_backup[3]["id"] + + # Remove backup in the middle + self.remove_one_backup(backup_dir, "node", first_page_id) + + # Merge all backups + error = self.pb.merge_backup("node", second_page_id, options=['-j5'], expect_error=True) + self.assertMessage(error, contains=f"WARNING: Backup {first_page_id} is missing\n" + f"ERROR: Failed to find parent full backup for {second_page_id}") + + def test_merge_with_multiple_full_backups(self): + """ + 1. Full backup -> change data + 2. Delta -> change data + 3. Page -> change data + 4. Full -> change data + 5. Page -> change data + 6. Delta + 7. Merge all backups in one + Restore and compare + """ + # Initialize instance and backup directory + node = self.pg_node.make_simple('node', set_replication=True) + + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=10) + + # Do uncompressed FULL backup + self.pb.backup_node("node", node) + show_backup = self.pb.show("node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed DELTA backup + self.pb.backup_node("node", node, + backup_type="delta") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed PAGE backup + self.pb.backup_node("node", node, backup_type="page") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed FULL backup + self.pb.backup_node("node", node) + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed PAGE backup + self.pb.backup_node("node", node, backup_type="page") + + # Change data + pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed DELTA backup + self.pb.backup_node("node", node, + backup_type="delta") + + pgdata = self.pgdata_content(node.data_dir) + + # Check backups + show_backup = self.pb.show("node") + self.assertEqual(len(show_backup), 6) + self.assertEqual(show_backup[0]["status"], "OK") + self.assertEqual(show_backup[0]["backup-mode"], "FULL") + + self.assertEqual(show_backup[1]["status"], "OK") + self.assertEqual(show_backup[1]["backup-mode"], "DELTA") + + self.assertEqual(show_backup[2]["status"], "OK") + self.assertEqual(show_backup[2]["backup-mode"], "PAGE") + + self.assertEqual(show_backup[3]["status"], "OK") + self.assertEqual(show_backup[3]["backup-mode"], "FULL") + + self.assertEqual(show_backup[4]["status"], "OK") + self.assertEqual(show_backup[4]["backup-mode"], "PAGE") + + self.assertEqual(show_backup[5]["status"], "OK") + self.assertEqual(show_backup[5]["backup-mode"], "DELTA") + + last_backup_id = show_backup[5]["id"] + + # Merge all backups + self.pb.merge_backup("node", last_backup_id, options=['-j5']) + show_backups = self.pb.show("node") + + # Check number of backups and status + self.assertEqual(len(show_backups), 4) + self.assertEqual(show_backup[0]["status"], "OK") + self.assertEqual(show_backup[0]["backup-mode"], "FULL") + + self.assertEqual(show_backup[1]["status"], "OK") + self.assertEqual(show_backup[1]["backup-mode"], "DELTA") + + self.assertEqual(show_backup[2]["status"], "OK") + self.assertEqual(show_backup[2]["backup-mode"], "PAGE") + + self.assertEqual(show_backup[3]["status"], "OK") + self.assertEqual(show_backup[3]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.pb.restore_node('node', node=node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + node.cleanup() + + def test_merge_with_logical_corruption(self): + """ + 1. Full backup -> change data + 2. Break logic (remove a referenced row from parent table) + 3. Mark foreign key constraint as NOT VALID + 4. Perform PAGE backup + 5. Merge all backups into one + 6. Restore and compare data directories + 7. Validate foreign key constraint to check that the logical corruption is also restored + """ + backup_dir = self.backup_dir + + # Initialize instance and backup directory + node = self.pg_node.make_simple('node', set_replication=True) + + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) + node.slow_start() + + # Create a table and fill + node.safe_psql("postgres", """ + CREATE TABLE parent_table ( + id serial PRIMARY KEY, + name varchar(100) NOT NULL + ); + + CREATE TABLE child_table ( + id serial PRIMARY KEY, + parent_id integer REFERENCES parent_table (id), + value varchar(100) NOT NULL + ); + + INSERT INTO parent_table (name) VALUES ('Parent 1'), ('Parent 2'), ('Parent 3'); + + INSERT INTO child_table (parent_id, value) VALUES (1, 'Child 1.1'), (1, 'Child 1.2'), (2, 'Child 2.1'), + (2, 'Child 2.2'), (3, 'Child 3.1'), (3, 'Child 3.2'); + """) + + # Do Full backup + self.pb.backup_node("node", node) + + # Break logic + node.safe_psql("postgres", """ + ALTER TABLE child_table DROP CONSTRAINT child_table_parent_id_fkey; + DELETE FROM parent_table WHERE id = 2; + ALTER TABLE child_table ADD CONSTRAINT child_table_parent_id_fkey FOREIGN KEY (parent_id) REFERENCES parent_table (id) NOT VALID; + """) + + # Do PAGE backup + self.pb.backup_node("node", node, backup_type="page") + + pgdata = self.pgdata_content(node.data_dir) + + # Check backups + show_backup = self.pb.show("node") + self.assertEqual(len(show_backup), 2) + self.assertEqual(show_backup[0]["status"], "OK") + self.assertEqual(show_backup[0]["backup-mode"], "FULL") + + self.assertEqual(show_backup[1]["status"], "OK") + self.assertEqual(show_backup[1]["backup-mode"], "PAGE") + + page_id = show_backup[1]["id"] + + # Merge all backups + self.pb.merge_backup("node", page_id, options=['-j5']) + show_backups = self.pb.show("node") + + # Check number of backups and status + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.pb.restore_node('node', node=node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + node.slow_start() + + # Check that logic of restored table also broken + error = node.safe_psql("postgres", """ + ALTER TABLE child_table VALIDATE CONSTRAINT child_table_parent_id_fkey; + """, expect_error=True) + self.assertMessage(error, contains='Key (parent_id)=(2) is not present in table "parent_table"') + + # Clean after yourself + node.cleanup() + + def test_two_merges_1(self): + """ + Test two merges for one full backup. + """ + node = self.pg_node.make_simple('node', + initdb_params=['--data-checksums']) + node.set_auto_conf({'shared_buffers': '2GB', + 'autovacuum': 'off'}) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=100) + + self.pb.backup_node('node', node, backup_type="full") + + pgbench = node.pgbench(options=['-t', '20000', '-c', '3', '-n']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20000', '-c', '3', '-n']) + pgbench.wait() + + prev_id = self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20000', '-c', '3', '-n']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20000', '-c', '3', '-n']) + pgbench.wait() + + last_id = self.pb.backup_node('node', node, backup_type="page") + + checksum = node.pgbench_table_checksums() + node.stop() + + self.pb.merge_backup('node', prev_id) + + self.pb.merge_backup('node', last_id) + + restored = self.pg_node.make_empty('restored') + self.pb.restore_node('node', restored) + restored.set_auto_conf({'port': restored.port}) + restored.slow_start() + + restored_checksum = restored.pgbench_table_checksums() + self.assertEqual(checksum, restored_checksum, + "data are not equal") + + def test_two_merges_2(self): + """ + Test two merges for one full backup with data in tablespace. + """ + node = self.pg_node.make_simple('node', initdb_params=['--data-checksums']) + node.set_auto_conf({'shared_buffers': '2GB', + 'autovacuum': 'off'}) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + # Fill with data + node.pgbench_init(scale=100, options=['--tablespace=somedata']) + + self.pb.backup_node('node', node, backup_type="full") + + pgbench = node.pgbench(options=['-t', '20000', '-c', '3', '-n']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20000', '-c', '3', '-n']) + pgbench.wait() + + prev_id = self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20000', '-c', '3', '-n']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20000', '-c', '3', '-n']) + pgbench.wait() + + last_id = self.pb.backup_node('node', node, backup_type="page") + + checksum = node.pgbench_table_checksums() + node.stop() + + self.pb.merge_backup('node', prev_id) + + self.pb.merge_backup('node', last_id) + + restored = self.pg_node.make_empty('restored') + node_ts = os.path.join(node.base_dir, 'somedata') + restored_ts = os.path.join(restored.base_dir, 'somedata') + os.mkdir(restored_ts) + self.pb.restore_node('node', restored, + options=['-T', f"{node_ts}={restored_ts}"]) + restored.set_auto_conf({'port': restored.port}) + restored.slow_start() + + restored_checksum = restored.pgbench_table_checksums() + self.assertEqual(checksum, restored_checksum, + "data are not equal") + + @needs_gdb + def test_backup_while_merge(self): + """ + Test backup is not possible while closest full backup is in merge. + (PBCKP-626_) + TODO: fix it if possible. + """ + + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + node.pgbench_init(scale=1) + + self.pb.backup_node('node', node, backup_type="full") + + pgbench = node.pgbench(options=['-t', '100', '-c', '3', '-n']) + pgbench.wait() + + first_page = self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '100', '-c', '3', '-n']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '100', '-c', '3', '-n']) + pgbench.wait() + + gdb = self.pb.merge_backup('node', first_page, gdb=True) + gdb.set_breakpoint('create_directories_in_full') + gdb.run_until_break() + + self.pb.backup_node('node', node, backup_type="page", + expect_error="just because it goes this way yet.") + + gdb.kill() + +############################################################################ +# dry-run +############################################################################ + + def test_basic_dry_run_merge_full_2page(self): + """ + 1. Full backup -> fill data + 2. First Page backup -> fill data + 3. Second Page backup + 4. Merge 2 "Page" backups with dry-run + Compare instance directory before and after merge + """ + # Initialize instance and backup directory + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) + node.slow_start() + + # Do full backup + self.pb.backup_node("node", node, options=['--compress']) + show_backup = self.pb.show("node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Fill with data + with node.connect() as conn: + conn.execute("create table test (id int);") + conn.execute( + "insert into test select i from generate_series(1,10) s(i);") + conn.commit() + + # Do first page backup + self.pb.backup_node("node", node, backup_type="page", options=['--compress']) + show_backup = self.pb.show("node")[1] + + # sanity check + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Fill with data + with node.connect() as conn: + conn.execute( + "insert into test select i from generate_series(1,10) s(i);") + count1 = conn.execute("select count(*) from test") + conn.commit() + + # take PAGE backup with external directory pointing to a file + external_dir = self.get_tblspace_path(node, 'somedirectory') + os.mkdir(external_dir) + + # Do second page backup + self.pb.backup_node("node", node, + backup_type="page", options=['--compress', '--external-dirs={0}'.format(external_dir)]) + show_backup = self.pb.show("node")[2] + page_id = show_backup["id"] + + # sanity check + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Check data changes absence + instance_before = self.pgdata_content(os.path.join(self.backup_dir, 'backups/node')) + + # Merge all backups + output = self.pb.merge_backup("node", page_id, + options=['--dry-run']) + self.assertNotIn("WARNING", output) + instance_after = self.pgdata_content(os.path.join(self.backup_dir, 'backups/node')) + self.compare_pgdata(instance_before, instance_after) + + show_backups = self.pb.show("node") + node.cleanup() + + @unittest.skipIf(not fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_basic_dry_run_check_merge_with_access(self): + """ + Access check suite if disk mounted as read_only + """ + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance("node", node) + self.pb.set_archiving("node", node) + node.slow_start() + + # Do full backup + self.pb.backup_node("node", node, options=['--compress']) + show_backup = self.pb.show("node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Fill with data + with node.connect() as conn: + conn.execute("create table test (id int);") + conn.execute( + "insert into test select i from generate_series(1,10) s(i);") + conn.commit() + + # Do first page backup + self.pb.backup_node("node", node, backup_type="page", options=['--compress']) + show_backup = self.pb.show("node")[1] + + # sanity check + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Fill with data + with node.connect() as conn: + conn.execute( + "insert into test select i from generate_series(1,10) s(i);") + count1 = conn.execute("select count(*) from test") + conn.commit() + + # take PAGE backup with external directory pointing to a file + external_dir = self.get_tblspace_path(node, 'somedirectory') + os.mkdir(external_dir) + + # Do second page backup + self.pb.backup_node("node", node, + backup_type="page", options=['--compress', '--external-dirs={0}'.format(external_dir)]) + show_backup = self.pb.show("node")[2] + page_id = show_backup["id"] + + # sanity check + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Access check + dir_path = os.path.join(self.backup_dir, 'backups/node') + dir_mode = os.stat(dir_path).st_mode + os.chmod(dir_path, 0o500) + + error_message = self.pb.merge_backup("node", page_id, + options=['--dry-run'], expect_error ='because of changed permissions') + try: + self.assertMessage(error_message, contains='ERROR: Check permissions') + finally: + # Cleanup + os.chmod(dir_path, dir_mode) + + dir_path = os.path.join(self.backup_dir, 'backups/node/', page_id) + dir_mode = os.stat(dir_path).st_mode + os.chmod(dir_path, 0o500) + + error_message = self.pb.merge_backup("node", page_id, + options=['--dry-run'], expect_error ='because of changed permissions') + try: + self.assertMessage(error_message, contains='ERROR: Check permissions') + finally: + # Cleanup + os.chmod(dir_path, dir_mode) + + + + +class BenchMerge(ProbackupTest): + + def setUp(self): + super().setUp() + + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() -# 1. Need new test with corrupted FULL backup -# 2. different compression levels + node.execute(""" + do $$ + declare + i int; + begin + for i in 0..2000 loop + execute 'create table x'||i||'(i int primary key, j text);'; + commit; + end loop; + end; + $$; + """) + + start = int(time.time()) + self.pb.backup_node('node', node, + options=['--start-time', str(start)]) + for i in range(50): + start += 1 + self.pb.backup_node('node', node, backup_type='page', + options=['--start-time', str(start)]) + start += 1 + self.backup_id = self.pb.backup_node('node', node, backup_type='page', + options=['--start-time', str(start)]) + + def test_bench_merge_long_chain(self): + """ + test long incremental chain with a lot of tables + """ + + start = time.time() + self.pb.merge_backup('node', self.backup_id) + stop = time.time() + print(f"LASTS FOR {stop - start}") diff --git a/tests/option_test.py b/tests/option_test.py index 66cc13746..89c5c52e0 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -1,235 +1,211 @@ -import unittest import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest, fs_backup_class import locale -class OptionTest(ProbackupTest, unittest.TestCase): + +class OptionTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure def test_help_1(self): """help options""" - with open(os.path.join(self.dir_path, "expected/option_help.out"), "rb") as help_out: + with open(os.path.join(self.tests_source_path, "expected/option_help.out"), "rb") as help_out: self.assertEqual( - self.run_pb(["--help"]), + self.pb.run(["--help"], use_backup_dir=None), help_out.read().decode("utf-8") ) - # @unittest.skip("skip") - def test_version_2(self): - """help options""" - with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: - self.assertIn( - version_out.read().decode("utf-8").strip(), - self.run_pb(["--version"]) - ) - # @unittest.skip("skip") def test_without_backup_path_3(self): """backup command failure without backup mode option""" - try: - self.run_pb(["backup", "-b", "full"]) - self.assertEqual(1, 0, "Expecting Error because '-B' parameter is not specified.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: No backup catalog path specified.\n' + \ - 'Please specify it either using environment variable BACKUP_PATH or\n' + \ - 'command line option --backup-path (-B)', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.run(["backup", "-b", "full"], + expect_error="because '-B' parameter is not specified", use_backup_dir=None) + self.assertMessage(contains="No backup catalog path specified.\n" + "Please specify it either using environment variable BACKUP_DIR or\n" + "command line option --backup-path (-B)") - # @unittest.skip("skip") def test_options_4(self): """check options test""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) # backup command failure without instance option - try: - self.run_pb(["backup", "-B", backup_dir, "-D", node.data_dir, "-b", "full"]) - self.assertEqual(1, 0, "Expecting Error because 'instance' parameter is not specified.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Required parameter not specified: --instance', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.run(["backup", "-D", node.data_dir, "-b", "full"], + expect_error="because 'instance' parameter is not specified") + self.assertMessage(contains='ERROR: Required parameter not specified: --instance') # backup command failure without backup mode option - try: - self.run_pb(["backup", "-B", backup_dir, "--instance=node", "-D", node.data_dir]) - self.assertEqual(1, 0, "Expecting Error because '-b' parameter is not specified.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: No backup mode specified.\nPlease specify it either using environment variable BACKUP_MODE or\ncommand line option --backup-mode (-b)', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.run(["backup", "--instance=node", "-D", node.data_dir], + expect_error="Expecting Error because '-b' parameter is not specified") + self.assertMessage(contains="Please specify it either using environment variable BACKUP_MODE or\n" + "command line option --backup-mode (-b)") # backup command failure with invalid backup mode option - try: - self.run_pb(["backup", "-B", backup_dir, "--instance=node", "-b", "bad"]) - self.assertEqual(1, 0, "Expecting Error because backup-mode parameter is invalid.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid backup-mode "bad"', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.run(["backup", "--instance=node", "-b", "bad"], + expect_error="because backup-mode parameter is invalid") + self.assertMessage(contains='ERROR: Invalid backup-mode "bad"') # delete failure without delete options - try: - self.run_pb(["delete", "-B", backup_dir, "--instance=node"]) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because delete options are omitted.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: You must specify at least one of the delete options: ' - '--delete-expired |--delete-wal |--merge-expired |--status |(-i, --backup-id)', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - + self.pb.run(["delete", "--instance=node"], + expect_error="because delete options are omitted") + self.assertMessage(contains='ERROR: You must specify at least one of the delete options: ' + '--delete-expired |--delete-wal |--merge-expired |--status |(-i, --backup-id)') # delete failure without ID - try: - self.run_pb(["delete", "-B", backup_dir, "--instance=node", '-i']) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because backup ID is omitted.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "Option '-i' requires an argument", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.run(["delete", "--instance=node", '-i'], + expect_error="because backup ID is omitted") + self.assertMessage(contains="Option '-i' requires an argument") + + #init command with bad option + self.pb.run(["init","--bad"], + expect_error="because unknown option") + self.assertMessage(contains="Unknown option '--bad'") + + # run with bad short option + self.pb.run(["init","-aB"], + expect_error="because unknown option") + self.assertMessage(contains="Unknown option '-aB'") # @unittest.skip("skip") def test_options_5(self): """check options test""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - output = self.init_pb(backup_dir) - self.assertIn(f"INFO: Backup catalog '{backup_dir}' successfully initialized", output) + output = self.pb.init() + self.assertIn( + f"INFO: Backup catalog '{backup_dir}' successfully initialized", + output) - self.add_instance(backup_dir, 'node', node) + self.pb.add_instance('node', node) node.slow_start() # syntax error in pg_probackup.conf - conf_file = os.path.join(backup_dir, "backups", "node", "pg_probackup.conf") - with open(conf_file, "a") as conf: - conf.write(" = INFINITE\n") - try: - self.backup_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because of garbage in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Syntax error in " = INFINITE', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.clean_pb(backup_dir) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + with self.modify_backup_config(backup_dir, 'node') as cf: + cf.data += " = INFINITE\n" + + self.pb.backup_node('node', node, + expect_error="because of garbage in pg_probackup.conf") + self.assertMessage(regex=r'ERROR: Syntax error .* INFINITE') + + backup_dir.cleanup() + self.pb.init() + self.pb.add_instance('node', node) # invalid value in pg_probackup.conf - with open(conf_file, "a") as conf: - conf.write("BACKUP_MODE=\n") - - try: - self.backup_node(backup_dir, 'node', node, backup_type=None), - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because of invalid backup-mode in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid option "BACKUP_MODE" in file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.clean_pb(backup_dir) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + with self.modify_backup_config(backup_dir, 'node') as cf: + cf.data += "BACKUP_MODE=\n" + + self.pb.backup_node('node', node, backup_type=None, + expect_error="because of invalid backup-mode in pg_probackup.conf") + self.assertMessage(contains='ERROR: Invalid option "BACKUP_MODE" in file') + + backup_dir.cleanup() + self.pb.init() + self.pb.add_instance('node', node) # Command line parameters should override file values - with open(conf_file, "a") as conf: - conf.write("retention-redundancy=1\n") + with self.modify_backup_config(backup_dir, 'node') as cf: + cf.data += "retention-redundancy=1\n" - self.assertEqual(self.show_config(backup_dir, 'node')['retention-redundancy'], '1') + self.assertEqual(self.pb.show_config('node')['retention-redundancy'], '1') # User cannot send --system-identifier parameter via command line - try: - self.backup_node(backup_dir, 'node', node, options=["--system-identifier", "123"]), - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because option system-identifier cannot be specified in command line.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Option system-identifier cannot be specified in command line', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.backup_node('node', node, options=["--system-identifier", "123"], + expect_error="because option system-identifier cannot be specified in command line") + self.assertMessage(contains='ERROR: Option system-identifier cannot be specified in command line') # invalid value in pg_probackup.conf - with open(conf_file, "a") as conf: - conf.write("SMOOTH_CHECKPOINT=FOO\n") - - try: - self.backup_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, "Expecting Error because option -C should be boolean.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid option "SMOOTH_CHECKPOINT" in file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.clean_pb(backup_dir) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + with self.modify_backup_config(backup_dir, 'node') as cf: + cf.data += "SMOOTH_CHECKPOINT=FOO\n" + + self.pb.backup_node('node', node, + expect_error="because option smooth-checkpoint could be specified in command-line only") + self.assertMessage(contains='ERROR: Invalid option "SMOOTH_CHECKPOINT" in file') + + backup_dir.cleanup() + self.pb.init() + self.pb.add_instance('node', node) # invalid option in pg_probackup.conf - with open(conf_file, "a") as conf: - conf.write("TIMELINEID=1\n") - - try: - self.backup_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual(1, 0, 'Expecting Error because of invalid option "TIMELINEID".\n Output: {0} \n CMD: {1}'.format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid option "TIMELINEID" in file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + with self.modify_backup_config(backup_dir, 'node') as cf: + cf.data += "TIMELINEID=1\n" + + self.pb.backup_node('node', node, + expect_error='because of invalid option "TIMELINEID"') + self.assertMessage(contains='ERROR: Invalid option "TIMELINEID" in file') # @unittest.skip("skip") def test_help_6(self): """help options""" if ProbackupTest.enable_nls: - if check_locale('ru_RU.utf-8'): - self.test_env['LC_ALL'] = 'ru_RU.utf-8' - with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + if check_locale('ru_RU.UTF-8'): + env = self.test_env.copy() + env['LC_CTYPE'] = 'ru_RU.UTF-8' + env['LC_MESSAGES'] = 'ru_RU.UTF-8' + env['LANGUAGE'] = 'ru_RU' + with open(os.path.join(self.tests_source_path, "expected/option_help_ru.out"), "rb") as help_out: self.assertEqual( - self.run_pb(["--help"]), + self.pb.run(["--help"], env=env, use_backup_dir=None), help_out.read().decode("utf-8") ) else: self.skipTest( - "Locale ru_RU.utf-8 doesn't work. You need install ru_RU.utf-8 locale for this test") + "The ru_RU.UTF-8 locale is not available. You may need to install it to run this test.") else: self.skipTest( 'You need configure PostgreSQL with --enabled-nls option for this test') + def test_skip_if_exists(self): + """check options --skip-if-exists""" + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.assertMessage(contains=f"INFO: Backup catalog '{backup_dir}' successfully initialized") + if fs_backup_class.is_file_based: + self.pb.init(expect_error=True) + self.assertMessage(contains=f"ERROR: Backup catalog '{backup_dir}' already exists and is not empty") + + self.pb.init(options=['--skip-if-exists']) + self.assertMessage(contains=f"WARNING: Backup catalog '{backup_dir}' already exists and is not empty, skipping") + self.assertMessage(has_no="successfully initialized") + + self.pb.add_instance('node', node) + self.assertMessage(contains="INFO: Instance 'node' successfully initialized") + self.pb.add_instance('node', node, expect_error=True) + self.assertMessage(contains="ERROR: Instance 'node' backup directory already exists") + + self.pb.add_instance('node', node, options=['--skip-if-exists']) + self.assertMessage(contains=f"WARNING: Instance 'node' backup directory already exists: '{backup_dir}/backups/node'. Skipping") + self.assertMessage(has_no="successfully initialized") + + # @unittest.skip("skip") + def test_options_no_scale_units(self): + """check --no-scale-units option""" + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + self.pb.init() + self.pb.add_instance('node', node) + + # check that --no-scale-units option works correctly + self.pb.run(["show-config", "-D", node.data_dir, "--instance", "node"]) + self.assertMessage(contains="archive-timeout = 5min") + self.pb.run(["show-config", "-D", node.data_dir, "--instance", "node", "--no-scale-units"]) + self.assertMessage(has_no="archive-timeout = 300s") + self.assertMessage(contains="archive-timeout = 300") + # check that we have now quotes ("") in json output + self.pb.run(["show-config", "--instance", "node", "--no-scale-units", "--format=json"]) + self.assertMessage(contains='"archive-timeout": 300,') + self.assertMessage(contains='"retention-redundancy": 0,') + self.assertMessage(has_no='"archive-timeout": "300",') + + + def check_locale(locale_name): ret=True diff --git a/tests/page_test.py b/tests/page_test.py index 99f3ce992..10959bd8b 100644 --- a/tests/page_test.py +++ b/tests/page_test.py @@ -1,14 +1,9 @@ -import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest from testgres import QueryException -from datetime import datetime, timedelta -import subprocess -import gzip -import shutil import time -class PageTest(ProbackupTest, unittest.TestCase): +class PageTest(ProbackupTest): # @unittest.skip("skip") def test_basic_page_vacuum_truncate(self): @@ -18,20 +13,16 @@ def test_basic_page_vacuum_truncate(self): take page backup, take second page backup, restore last page backup and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node_restored.cleanup() node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -48,7 +39,7 @@ def test_basic_page_vacuum_truncate(self): "postgres", "vacuum t_heap") - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # TODO: make it dynamic node.safe_psql( @@ -58,11 +49,9 @@ def test_basic_page_vacuum_truncate(self): "postgres", "vacuum t_heap") - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) @@ -70,8 +59,7 @@ def test_basic_page_vacuum_truncate(self): old_tablespace = self.get_tblspace_path(node, 'somedata') new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format(old_tablespace, new_tablespace)]) @@ -81,7 +69,7 @@ def test_basic_page_vacuum_truncate(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # Logical comparison @@ -98,15 +86,12 @@ def test_page_vacuum_truncate_1(self): take page backup, insert some data, take second page backup and check data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -121,7 +106,7 @@ def test_page_vacuum_truncate_1(self): "postgres", "vacuum t_heap") - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -131,8 +116,7 @@ def test_page_vacuum_truncate_1(self): "postgres", "vacuum t_heap") - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') node.safe_psql( "postgres", @@ -141,22 +125,20 @@ def test_page_vacuum_truncate_1(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,1) i") - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) # Physical comparison pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") @@ -166,18 +148,15 @@ def test_page_stream(self): restore them and check data correctness """ self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'} ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL BACKUP @@ -188,8 +167,7 @@ def test_page_stream(self): "from generate_series(0,100) i") full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, + full_backup_id = self.pb.backup_node('node', node, backup_type='full', options=['--stream']) # PAGE BACKUP @@ -199,8 +177,7 @@ def test_page_stream(self): "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") page_result = node.table_checksum("t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, + page_backup_id = self.pb.backup_node('node', node, backup_type='page', options=['--stream', '-j', '4']) if self.paranoia: @@ -210,13 +187,9 @@ def test_page_stream(self): node.cleanup() # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=full_backup_id, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, + backup_id=full_backup_id, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(full_backup_id)) node.slow_start() full_result_new = node.table_checksum("t_heap") @@ -224,13 +197,9 @@ def test_page_stream(self): node.cleanup() # Check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, - backup_id=page_backup_id, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, + backup_id=page_backup_id, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(page_backup_id)) # GET RESTORED PGDATA AND COMPARE if self.paranoia: @@ -249,18 +218,15 @@ def test_page_archive(self): restore them and check data correctness """ self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'} ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL BACKUP @@ -269,8 +235,7 @@ def test_page_archive(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='full') + full_backup_id = self.pb.backup_node('node', node, backup_type='full') # PAGE BACKUP node.safe_psql( @@ -279,8 +244,7 @@ def test_page_archive(self): "md5(i::text) as text, md5(i::text)::tsvector as tsvector " "from generate_series(100, 200) i") page_result = node.table_checksum("t_heap") - page_backup_id = self.backup_node( - backup_dir, 'node', node, + page_backup_id = self.pb.backup_node('node', node, backup_type='page', options=["-j", "4"]) if self.paranoia: @@ -290,18 +254,13 @@ def test_page_archive(self): node.cleanup() # Restore and check full backup - self.assertIn("INFO: Restore of backup {0} completed.".format( - full_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=full_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(full_backup_id)) node.slow_start() full_result_new = node.table_checksum("t_heap") @@ -309,19 +268,15 @@ def test_page_archive(self): node.cleanup() # Restore and check page backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(page_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=page_backup_id, options=[ "-j", "4", "--immediate", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(page_backup_id)) - # GET RESTORED PGDATA AND COMPARE + # GET RESTORED PGDATA AND COMPARE if self.paranoia: pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -338,20 +293,17 @@ def test_page_multiple_segments(self): Make node, create table with multiple segments, write some data to it, check page and data correctness """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'fsync': 'off', 'shared_buffers': '1GB', 'maintenance_work_mem': '1GB', 'full_page_writes': 'off'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -359,7 +311,7 @@ def test_page_multiple_segments(self): # CREATE TABLE node.pgbench_init(scale=100, options=['--tablespace=somedata']) # FULL BACKUP - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # PGBENCH STUFF pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum']) @@ -368,21 +320,19 @@ def test_page_multiple_segments(self): # GET LOGICAL CONTENT FROM NODE result = node.table_checksum("pgbench_accounts") # PAGE BACKUP - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # GET PHYSICAL CONTENT FROM NODE pgdata = self.pgdata_content(node.data_dir) # RESTORE NODE - restored_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) + restored_node = self.pg_node.make_simple('restored_node') restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( restored_node, 'somedata_restored') - self.restore_node( - backup_dir, 'node', restored_node, + self.pb.restore_node('node', restored_node, options=[ "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) @@ -391,7 +341,7 @@ def test_page_multiple_segments(self): pgdata_restored = self.pgdata_content(restored_node.data_dir) # START RESTORED NODE - self.set_auto_conf(restored_node, {'port': restored_node.port}) + restored_node.set_auto_conf({'port': restored_node.port}) restored_node.slow_start() result_new = restored_node.table_checksum("pgbench_accounts") @@ -409,23 +359,21 @@ def test_page_delete(self): delete everything from table, vacuum table, take page backup, restore page backup, compare . """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', + set_replication=True, pg_options={ 'checkpoint_timeout': '30s', } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", "create table t_heap tablespace somedata as select i as id," @@ -441,18 +389,15 @@ def test_page_delete(self): "vacuum t_heap") # PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -467,7 +412,7 @@ def test_page_delete(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") @@ -477,19 +422,16 @@ def test_page_delete_1(self): delete everything from table, vacuum table, take page backup, restore page backup, compare . """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -501,7 +443,7 @@ def test_page_delete_1(self): " from generate_series(0,100) i" ) # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -514,19 +456,16 @@ def test_page_delete_1(self): ) # PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored') + node_restored = self.pg_node.make_simple('node_restored' ) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -541,36 +480,31 @@ def test_page_delete_1(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() def test_parallel_pagemap(self): """ Test for parallel WAL segments reading, during which pagemap is built """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={ "hot_standby": "on" } ) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + node_restored = self.pg_node.make_simple('node_restored', ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node_restored.cleanup() - self.set_archiving(backup_dir, 'node', node) + self.pb.set_archiving('node', node) node.slow_start() # Do full backup - self.backup_node(backup_dir, 'node', node) - show_backup = self.show_pb(backup_dir, 'node')[0] + self.pb.backup_node('node', node) + show_backup = self.pb.show('node')[0] self.assertEqual(show_backup['status'], "OK") self.assertEqual(show_backup['backup-mode'], "FULL") @@ -586,9 +520,8 @@ def test_parallel_pagemap(self): count1 = conn.execute("select count(*) from test") # ... and do page backup with parallel pagemap - self.backup_node( - backup_dir, 'node', node, backup_type="page", options=["-j", "4"]) - show_backup = self.show_pb(backup_dir, 'node')[1] + self.pb.backup_node('node', node, backup_type="page", options=["-j", "4"]) + show_backup = self.pb.show('node')[1] self.assertEqual(show_backup['status'], "OK") self.assertEqual(show_backup['backup-mode'], "PAGE") @@ -597,14 +530,14 @@ def test_parallel_pagemap(self): pgdata = self.pgdata_content(node.data_dir) # Restore it - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) # Physical comparison if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # Check restored node @@ -620,23 +553,19 @@ def test_parallel_pagemap_1(self): """ Test for parallel WAL segments reading, during which pagemap is built """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - # Initialize instance and backup directory - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={} ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Do full backup - self.backup_node(backup_dir, 'node', node) - show_backup = self.show_pb(backup_dir, 'node')[0] + self.pb.backup_node('node', node) + show_backup = self.pb.show('node')[0] self.assertEqual(show_backup['status'], "OK") self.assertEqual(show_backup['backup-mode'], "FULL") @@ -645,22 +574,20 @@ def test_parallel_pagemap_1(self): node.pgbench_init(scale=10) # do page backup in single thread - page_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") + page_id = self.pb.backup_node('node', node, backup_type="page") - self.delete_pb(backup_dir, 'node', page_id) + self.pb.delete('node', page_id) # ... and do page backup with parallel pagemap - self.backup_node( - backup_dir, 'node', node, backup_type="page", options=["-j", "4"]) - show_backup = self.show_pb(backup_dir, 'node')[1] + self.pb.backup_node('node', node, backup_type="page", options=["-j", "4"]) + show_backup = self.pb.show('node')[1] self.assertEqual(show_backup['status'], "OK") self.assertEqual(show_backup['backup-mode'], "PAGE") # Drop node and restore it node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() # Clean after yourself @@ -675,73 +602,45 @@ def test_page_backup_with_lost_wal_segment(self): run page backup, expecting error because of missing wal segment make sure that backup status is 'ERROR' """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # make some wals node.pgbench_init(scale=3) # delete last wal segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( - wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] - wals = map(str, wals) - file = os.path.join(wals_dir, max(wals)) - os.remove(file) - if self.archive_compress: - file = file[:-3] + walfile = '000000010000000000000004'+self.compress_suffix + self.wait_instance_wal_exists(backup_dir, 'node', walfile) + self.remove_instance_wal(backup_dir, 'node', walfile) # Single-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'is absent' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='page', + expect_error="because of wal segment disappearance") + self.assertMessage(contains='Could not read WAL record at') + self.assertMessage(contains='is absent') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'Backup {0} should have STATUS "ERROR"') # Multi-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'is absent' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='page', + options=["-j", "4"], + expect_error="because of wal segment disappearance") + self.assertMessage(contains='Could not read WAL record at') + self.assertMessage(contains='is absent') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], + self.pb.show('node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') # @unittest.skip("skip") @@ -753,102 +652,44 @@ def test_page_backup_with_corrupted_wal_segment(self): run page backup, expecting error because of missing wal segment make sure that backup status is 'ERROR' """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # make some wals node.pgbench_init(scale=10) # delete last wal segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( - wals_dir, f)) and not f.endswith('.backup')] - wals = map(str, wals) - # file = os.path.join(wals_dir, max(wals)) - - if self.archive_compress: - original_file = os.path.join(wals_dir, '000000010000000000000004.gz') - tmp_file = os.path.join(backup_dir, '000000010000000000000004') - - with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - - # drop healthy file - os.remove(original_file) - file = tmp_file - - else: - file = os.path.join(wals_dir, '000000010000000000000004') - - # corrupt file - print(file) - with open(file, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close - - if self.archive_compress: - # compress corrupted file and replace with it old file - with open(file, 'rb') as f_in, gzip.open(original_file, 'wb', compresslevel=1) as f_out: - shutil.copyfileobj(f_in, f_out) - - file = os.path.join(wals_dir, '000000010000000000000004.gz') - - #if self.archive_compress: - # file = file[:-3] + file = '000000010000000000000004' + self.compress_suffix + self.wait_instance_wal_exists(backup_dir, 'node', file) + self.corrupt_instance_wal(backup_dir, 'node', file, 42, b"blah", + decompressed=self.archive_compress) # Single-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'Possible WAL corruption. Error has occured during reading WAL segment' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='page', + expect_error="because of wal segment disappearance") + self.assertMessage(contains='Could not read WAL record at') + self.assertMessage(contains='Possible WAL corruption. Error has occured during reading WAL segment') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'Backup {0} should have STATUS "ERROR"') # Multi-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format( - file) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='page', + options=["-j", "4"], + expect_error="because of wal segment disappearance") self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], + self.pb.show('node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') # @unittest.skip("skip") @@ -862,30 +703,24 @@ def test_page_backup_with_alien_wal_segment(self): expecting error because of alien wal segment make sure that backup status is 'ERROR' """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - alien_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'alien_node'), - set_replication=True, - initdb_params=['--data-checksums']) + alien_node = self.pg_node.make_simple('alien_node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.add_instance(backup_dir, 'alien_node', alien_node) - self.set_archiving(backup_dir, 'alien_node', alien_node) + self.pb.add_instance('alien_node', alien_node) + self.pb.set_archiving('alien_node', alien_node) alien_node.slow_start() - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - self.backup_node( - backup_dir, 'alien_node', alien_node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) + self.pb.backup_node('alien_node', alien_node, options=['--stream']) # make some wals node.safe_psql( @@ -909,63 +744,38 @@ def test_page_backup_with_alien_wal_segment(self): "from generate_series(0,10000) i;") # copy latest wal segment - wals_dir = os.path.join(backup_dir, 'wal', 'alien_node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( - wals_dir, f)) and not f.endswith('.backup')] - wals = map(str, wals) + wals = self.get_instance_wal_list(backup_dir, 'alien_node') filename = max(wals) - file = os.path.join(wals_dir, filename) - file_destination = os.path.join( - os.path.join(backup_dir, 'wal', 'node'), filename) - start = time.time() - while not os.path.exists(file_destination) and time.time() - start < 20: - time.sleep(0.1) - os.remove(file_destination) - os.rename(file, file_destination) + # wait `node` did archived same file + self.wait_instance_wal_exists(backup_dir, 'node', filename) + file_content = self.read_instance_wal(backup_dir, 'alien_node', filename) + self.write_instance_wal(backup_dir, 'node', filename, file_content) # Single-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because of alien wal segment.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'Could not read WAL record at' in e.message and - 'Possible WAL corruption. Error has occured during reading WAL segment' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='page', + expect_error="because of alien wal segment") + self.assertMessage(contains='Could not read WAL record at') + self.assertMessage(contains='Possible WAL corruption. Error has occured during reading WAL segment') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'Backup {0} should have STATUS "ERROR"') # Multi-thread PAGE backup - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of alien wal segment.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn('Could not read WAL record at', e.message) - self.assertIn('WAL file is from different database system: ' - 'WAL file database system identifier is', e.message) - self.assertIn('pg_control database system identifier is', e.message) - self.assertIn('Possible WAL corruption. Error has occured ' - 'during reading WAL segment', e.message) + self.pb.backup_node('node', node, backup_type='page', + options=["-j", "4"], + expect_error="because of alien wal segment") + self.assertMessage(contains='Could not read WAL record at') + self.assertMessage(contains='WAL file is from different database system: ' + 'WAL file database system identifier is') + self.assertMessage(contains='pg_control database system identifier is') + self.assertMessage(contains='Possible WAL corruption. Error has occured ' + 'during reading WAL segment') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], + self.pb.show('node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') # @unittest.skip("skip") @@ -973,17 +783,14 @@ def test_multithread_page_backup_with_toast(self): """ make node, create toast, do multithread PAGE backup """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # make some wals node.safe_psql( @@ -993,8 +800,7 @@ def test_multithread_page_backup_with_toast(self): "from generate_series(0,70) i") # Multi-thread PAGE backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='page', options=["-j", "4"]) # @unittest.skip("skip") @@ -1004,20 +810,17 @@ def test_page_create_db(self): restore database and check it presense """ self.maxDiff = None - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '10GB', 'checkpoint_timeout': '5min', } ) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL BACKUP @@ -1026,8 +829,7 @@ def test_page_create_db(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - self.backup_node( - backup_dir, 'node', node) + self.pb.backup_node('node', node) # CREATE DATABASE DB1 node.safe_psql("postgres", "create database db1") @@ -1037,18 +839,16 @@ def test_page_create_db(self): "md5(i::text)::tsvector as tsvector from generate_series(0,1000) i") # PAGE BACKUP - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, backup_id=backup_id, options=["-j", "4"]) # COMPARE PHYSICAL CONTENT @@ -1057,7 +857,7 @@ def test_page_create_db(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() node_restored.safe_psql('db1', 'select 1') @@ -1067,15 +867,13 @@ def test_page_create_db(self): node.safe_psql( "postgres", "drop database db1") # SECOND PAGE BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') if self.paranoia: pgdata = self.pgdata_content(node.data_dir) # RESTORE SECOND PAGE BACKUP - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, backup_id=backup_id, options=["-j", "4"] ) @@ -1086,24 +884,11 @@ def test_page_create_db(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() - try: - node_restored.safe_psql('db1', 'select 1') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are connecting to deleted database" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except QueryException as e: - self.assertTrue( - 'FATAL: database "db1" does not exist' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd) - ) + error_result = node_restored.safe_psql('db1', 'select 1', expect_error=True) + self.assertMessage(error_result, contains='FATAL: database "db1" does not exist') # @unittest.skip("skip") # @unittest.expectedFailure @@ -1119,15 +904,12 @@ def test_multi_timeline_page(self): P must have F as parent """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql("postgres", "create extension pageinspect") @@ -1142,16 +924,15 @@ def test_multi_timeline_page(self): "create extension amcheck_next") node.pgbench_init(scale=20) - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=full_id, + self.pb.restore_node('node', node, backup_id=full_id, options=[ '--recovery-target=immediate', '--recovery-target-action=promote']) @@ -1164,8 +945,7 @@ def test_multi_timeline_page(self): # create timelines for i in range(2, 7): node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target=latest', '--recovery-target-action=promote', @@ -1178,24 +958,22 @@ def test_multi_timeline_page(self): # create backup at 2, 4 and 6 timeline if i % 2 == 0: - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', + page_id = self.pb.backup_node('node', node, backup_type='page', options=['--log-level-file=VERBOSE']) pgdata = self.pgdata_content(node.data_dir) result = node.table_checksum("pgbench_accounts") - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() result_new = node_restored.table_checksum("pgbench_accounts") @@ -1204,21 +982,21 @@ def test_multi_timeline_page(self): self.compare_pgdata(pgdata, pgdata_restored) - self.checkdb_node( - backup_dir, - 'node', + self.pb.checkdb_node( + use_backup_dir=True, + instance='node', options=[ '--amcheck', '-d', 'postgres', '-p', str(node.port)]) - self.checkdb_node( - backup_dir, - 'node', + self.pb.checkdb_node( + use_backup_dir=True, + instance='node', options=[ '--amcheck', '-d', 'postgres', '-p', str(node_restored.port)]) - backup_list = self.show_pb(backup_dir, 'node') + backup_list = self.pb.show('node') self.assertEqual( backup_list[2]['parent-backup-id'], @@ -1251,16 +1029,13 @@ def test_multitimeline_page_1(self): P must have F as parent """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql("postgres", "create extension pageinspect") @@ -1275,21 +1050,20 @@ def test_multitimeline_page_1(self): "create extension amcheck_next") node.pgbench_init(scale=20) - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '20', '-c', '1']) pgbench.wait() - page1 = self.backup_node(backup_dir, 'node', node, backup_type='page') + page1 = self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - page1 = self.backup_node(backup_dir, 'node', node, backup_type='delta') + page1 = self.pb.backup_node('node', node, backup_type='delta') node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=page1, + self.pb.restore_node('node', node, backup_id=page1, options=[ '--recovery-target=immediate', '--recovery-target-action=promote']) @@ -1299,20 +1073,18 @@ def test_multitimeline_page_1(self): pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum']) pgbench.wait() - print(self.backup_node( - backup_dir, 'node', node, backup_type='page', + print(self.pb.backup_node('node', node, backup_type='page', options=['--log-level-console=LOG'], return_id=False)) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() self.compare_pgdata(pgdata, pgdata_restored) @@ -1320,18 +1092,16 @@ def test_multitimeline_page_1(self): @unittest.skip("skip") # @unittest.expectedFailure def test_page_pg_resetxlog(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Create table @@ -1344,7 +1114,7 @@ def test_page_pg_resetxlog(self): # "from generate_series(0,25600) i") "from generate_series(0,2560) i") - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( 'postgres', @@ -1366,7 +1136,7 @@ def test_page_pg_resetxlog(self): pg_resetxlog_path = self.get_bin_path('pg_resetxlog') wal_dir = 'pg_xlog' - self.run_binary( + self.pb.run_binary( [ pg_resetxlog_path, '-D', @@ -1383,35 +1153,72 @@ def test_page_pg_resetxlog(self): exit(1) # take ptrack backup -# self.backup_node( -# backup_dir, 'node', node, +# self.pb.backup_node( +# 'node', node, # backup_type='page', options=['--stream']) - try: - self.backup_node( - backup_dir, 'node', node, backup_type='page') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance was brutalized by pg_resetxlog" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertIn( - 'Insert error message', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='page', + expect_error="because instance was brutalized by pg_resetxlog") + self.assertMessage(contains='Insert error message') # pgdata = self.pgdata_content(node.data_dir) # -# node_restored = self.make_simple_node( -# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) +# node_restored = self.pg_node.make_simple('node_restored') # node_restored.cleanup() # -# self.restore_node( -# backup_dir, 'node', node_restored) +# self.pb.restore_node( +# 'node', node_restored) # # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) + + def test_page_huge_xlog_record(self): + node = self.pg_node.make_simple('node', + set_replication=True, + pg_options={ + 'max_locks_per_transaction': '1000', + 'work_mem': '100MB', + 'temp_buffers': '100MB', + 'wal_buffers': '128MB', + 'wal_level' : 'logical', + }) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Do full backup + self.pb.backup_node('node', node, backup_type='full') + + show_backup = self.pb.show('node')[0] + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "FULL") + + # Originally client had the problem at the transaction that (supposedly) + # deletes a lot of temporary tables (probably it was client disconnect). + # It generated ~40MB COMMIT WAL record. + # + # `pg_logical_emit_message` is much simpler and faster way to generate + # such huge record. + node.safe_psql( + "postgres", + "select pg_logical_emit_message(False, 'z', repeat('o', 60*1000*1000))") + + # Do page backup with 1 thread + backup_id = self.pb.backup_node('node', node, backup_type='page', options=['-j', '1']) + + show_backup = self.pb.show('node')[1] + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PAGE") + + self.pb.delete('node', backup_id) + + # Repeat backup with multiple threads + self.pb.backup_node('node', node, backup_type='page', options=['-j', '10']) + + show_backup = self.pb.show('node')[1] + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PAGE") diff --git a/tests/pgpro2068_test.py b/tests/pgpro2068_test.py index 04f0eb6fa..e69de29bb 100644 --- a/tests/pgpro2068_test.py +++ b/tests/pgpro2068_test.py @@ -1,171 +0,0 @@ -import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess -from time import sleep -import shutil -import signal -from testgres import ProcessType - - -class BugTest(ProbackupTest, unittest.TestCase): - - def test_minrecpoint_on_replica(self): - """ - https://jira.postgrespro.ru/browse/PGPRO-2068 - """ - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ - # 'checkpoint_timeout': '60min', - 'checkpoint_completion_target': '0.9', - 'bgwriter_delay': '10ms', - 'bgwriter_lru_maxpages': '1000', - 'bgwriter_lru_multiplier': '4.0', - 'max_wal_size': '256MB'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # take full backup and restore it as replica - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - # start replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'node', replica, options=['-R']) - self.set_replica(node, replica) - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - self.set_auto_conf( - replica, - {'port': replica.port, 'restart_after_crash': 'off'}) - - node.safe_psql( - "postgres", - "CREATE EXTENSION pageinspect") - - replica.slow_start(replica=True) - - # generate some data - node.pgbench_init(scale=10) - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "20"]) - pgbench.wait() - pgbench.stdout.close() - - # generate some more data and leave it in background - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-j 4", "-T", "100"]) - - # wait for shared buffer on replica to be filled with dirty data - sleep(20) - - # get pids of replica background workers - startup_pid = replica.auxiliary_pids[ProcessType.Startup][0] - checkpointer_pid = replica.auxiliary_pids[ProcessType.Checkpointer][0] - - # break checkpointer on UpdateLastRemovedPtr - gdb_checkpointer = self.gdb_attach(checkpointer_pid) - gdb_checkpointer._execute('handle SIGINT noprint nostop pass') - gdb_checkpointer._execute('handle SIGUSR1 noprint nostop pass') - gdb_checkpointer.set_breakpoint('UpdateLastRemovedPtr') - gdb_checkpointer.continue_execution_until_break() - - # break recovery on UpdateControlFile - gdb_recovery = self.gdb_attach(startup_pid) - gdb_recovery._execute('handle SIGINT noprint nostop pass') - gdb_recovery._execute('handle SIGUSR1 noprint nostop pass') - gdb_recovery.set_breakpoint('UpdateMinRecoveryPoint') - gdb_recovery.continue_execution_until_break() - gdb_recovery.set_breakpoint('UpdateControlFile') - gdb_recovery.continue_execution_until_break() - - # stop data generation - pgbench.wait() - pgbench.stdout.close() - - # kill someone, we need a crash - replica.kill(someone=ProcessType.BackgroundWriter) - gdb_recovery._execute('detach') - gdb_checkpointer._execute('detach') - - # just to be sure - try: - replica.stop(['-m', 'immediate', '-D', replica.data_dir]) - except: - pass - - # MinRecLSN = replica.get_control_data()['Minimum recovery ending location'] - - # Promote replica with 'immediate' target action - if self.get_version(replica) >= self.version_to_num('12.0'): - recovery_config = 'postgresql.auto.conf' - else: - recovery_config = 'recovery.conf' - - replica.append_conf( - recovery_config, "recovery_target = 'immediate'") - replica.append_conf( - recovery_config, "recovery_target_action = 'pause'") - replica.slow_start(replica=True) - - current_xlog_lsn_query = 'SELECT pg_last_wal_replay_lsn() INTO current_xlog_lsn' - if self.get_version(node) < 100000: - current_xlog_lsn_query = 'SELECT min_recovery_end_location INTO current_xlog_lsn FROM pg_control_recovery()' - - script = f''' -DO -$$ -DECLARE - roid oid; - current_xlog_lsn pg_lsn; - pages_from_future RECORD; - found_corruption bool := false; -BEGIN - {current_xlog_lsn_query}; - RAISE NOTICE 'CURRENT LSN: %', current_xlog_lsn; - FOR roid IN select oid from pg_class class where relkind IN ('r', 'i', 't', 'm') and relpersistence = 'p' LOOP - FOR pages_from_future IN - with number_of_blocks as (select blknum from generate_series(0, pg_relation_size(roid) / 8192 -1) as blknum ) - select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid - from number_of_blocks, page_header(get_raw_page(roid::regclass::text, number_of_blocks.blknum::int)) - where lsn > current_xlog_lsn LOOP - RAISE NOTICE 'Found page from future. OID: %, BLKNUM: %, LSN: %', roid, pages_from_future.blknum, pages_from_future.lsn; - found_corruption := true; - END LOOP; - END LOOP; - IF found_corruption THEN - RAISE 'Found Corruption'; - END IF; -END; -$$ LANGUAGE plpgsql; -'''.format(current_xlog_lsn_query=current_xlog_lsn_query) - - # Find blocks from future - replica.safe_psql( - 'postgres', - script) - - # error is expected if version < 10.6 - # gdb_backup.continue_execution_until_exit() - - # do basebackup - - # do pg_probackup, expect error diff --git a/tests/pgpro560_test.py b/tests/pgpro560_test.py index b665fd200..2a9548670 100644 --- a/tests/pgpro560_test.py +++ b/tests/pgpro560_test.py @@ -1,12 +1,12 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest from datetime import datetime, timedelta import subprocess from time import sleep -class CheckSystemID(ProbackupTest, unittest.TestCase): +class CheckSystemID(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure @@ -17,33 +17,20 @@ def test_pgpro560_control_file_loss(self): make backup check that backup failed """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() file = os.path.join(node.base_dir, 'data', 'global', 'pg_control') # Not delete this file permanently os.rename(file, os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy')) - try: - self.backup_node(backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because pg_control was deleted.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Could not open file' in e.message and - 'pg_control' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, options=['--stream'], + expect_error='because pg_control was deleted') + self.assertMessage(regex=r'ERROR: Could not get control file:.*pg_control') # Return this file to avoid Postger fail os.rename(os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy'), file) @@ -55,69 +42,29 @@ def test_pgpro560_systemid_mismatch(self): feed to backup PGDATA from node1 and PGPORT from node2 check that backup failed """ - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums']) + node1 = self.pg_node.make_simple('node1', + set_replication=True) node1.slow_start() - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2'), - set_replication=True, - initdb_params=['--data-checksums']) + node2 = self.pg_node.make_simple('node2', + set_replication=True) node2.slow_start() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node1', node1) + self.pb.init() + self.pb.add_instance('node1', node1) - try: - self.backup_node(backup_dir, 'node1', node2, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of SYSTEM ID mismatch.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was ' - 'initialized for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node1', node2, options=['--stream'], + expect_error="because of SYSTEM ID mismatch") + self.assertMessage(contains='ERROR: Backup data directory was ' + 'initialized for system id') + self.assertMessage(contains='but connected instance system id is') sleep(1) - try: - self.backup_node( - backup_dir, 'node1', node2, - data_dir=node1.data_dir, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of of SYSTEM ID mismatch.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was initialized ' - 'for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node1', node2, + data_dir=node1.data_dir, options=['--stream'], + expect_error="because of of SYSTEM ID mismatch") + self.assertMessage(contains='ERROR: Backup data directory was ' + 'initialized for system id') + self.assertMessage(contains='but connected instance system id is') diff --git a/tests/pgpro589_test.py b/tests/pgpro589_test.py index 8ce8e1f56..e600f142a 100644 --- a/tests/pgpro589_test.py +++ b/tests/pgpro589_test.py @@ -1,11 +1,11 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack from datetime import datetime, timedelta import subprocess -class ArchiveCheck(ProbackupTest, unittest.TestCase): +class ArchiveCheck(ProbackupTest): def test_pgpro589(self): """ @@ -14,17 +14,15 @@ def test_pgpro589(self): check that backup status equal to ERROR check that no files where copied to backup catalogue """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) # make erroneous archive_command - self.set_auto_conf(node, {'archive_command': 'exit 0'}) + node.set_auto_conf({'archive_command': 'exit 0'}) node.slow_start() node.pgbench_init(scale=5) @@ -40,27 +38,16 @@ def test_pgpro589(self): "select pg_relation_filepath('pgbench_accounts')").rstrip().decode( "utf-8") - try: - self.backup_node( - backup_dir, 'node', node, - options=['--archive-timeout=10']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing archive wal " - "segment with start_lsn.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Wait for WAL segment' in e.message and - 'ERROR: WAL segment' in e.message and - 'could not be archived in 10 seconds' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + options=['--archive-timeout=10'], + expect_error="because of missing archive wal segment " + "with start_lsn") + self.assertMessage(contains='INFO: Wait for WAL segment') + self.assertMessage(regex='ERROR: WAL segment .* could not be archived in 10 seconds') - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + backup_id = self.pb.show('node')[0]['id'] self.assertEqual( - 'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'], + 'ERROR', self.pb.show('node', backup_id)['status'], 'Backup should have ERROR status') file = os.path.join( backup_dir, 'backups', 'node', diff --git a/tests/ptrack_load_test.py b/tests/ptrack_load_test.py new file mode 100644 index 000000000..9e96ab8b7 --- /dev/null +++ b/tests/ptrack_load_test.py @@ -0,0 +1,150 @@ +import os +from .helpers.ptrack_helpers import ProbackupTest + +PAGE_SIZE = 8192 +ZEROES = b"\x00" * PAGE_SIZE + + +class PtrackLoadTest(ProbackupTest): + def setUp(self): + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + def find_zero_pages(self, node, pagemapset, file_path): + """ + Find zero pages in a file using pagemapset. + + Args: + node (Node): The PostgreSQL node instance. + pagemapset (dict): The pagemapset obtained from fetch_ptrack. + file_path (str): Path to the file to analyze. + + Returns: + list: List of missed pages. + """ + missed_pages = [] + + if os.path.isfile(file_path): + rel_path = file_path.replace(f"{node.data_dir}/", "") + with open(file_path, "rb") as f: + bno = 0 + while True: + page_data = f.read(PAGE_SIZE) + if not page_data: + break + + if page_data == ZEROES: + if not self.check_ptrack(pagemapset, rel_path, bno): + print(f"Missed page: {rel_path}|{bno}") + missed_pages.append(f'{rel_path}|{bno}') + else: + print(f"Found page: {rel_path}|{bno}") + + bno += 1 + + return missed_pages + + @staticmethod + def fetch_ptrack(node, lsn): + """ + Fetch pagemapset using ptrack_get_pagemapset function. + + Args: + node (Node): The PostgreSQL node instance. + lsn (str): The LSN (Log Sequence Number). + + Returns: + dict: Dictionary containing pagemapset data. + """ + result_map = {} + ptrack_out = node.execute( + "postgres", + f"select (ptrack_get_pagemapset('{lsn}')).*;") + for row in ptrack_out: + path, pagecount, pagemap = row + result_map[path] = bytearray(pagemap) + return result_map + + def check_ptrack(self, page_map, file, bno): + """ + Check if the given block number has changes in pagemap. + + Args: + page_map (dict): Pagemapset data. + file (str): File name. + bno (int): Block number. + + Returns: + bool: True if changes are detected, False otherwise. + """ + self.assertNotEqual(page_map, {}) + bits = page_map.get(file) + + if bits and bno // 8 < len(bits): + return (bits[bno // 8] & (1 << (bno & 7))) != 0 + else: + return False + + def test_load_ptrack_zero_pages(self): + """ + An error too many clients already for some clients is usual for this test + """ + pg_options = {'max_connections': 1024, + 'ptrack.map_size': 1024, + 'shared_buffers': '8GB', + 'checkpoint_timeout': '1d', + 'synchronous_commit': 'off', + 'fsync': 'off', + 'shared_preload_libraries': 'ptrack', + 'wal_buffers': '128MB', + 'wal_writer_delay': '5s', + 'wal_writer_flush_after': '16MB', + 'commit_delay': 100, + 'checkpoint_flush_after': '2MB', + 'max_wal_size': '10GB', + 'autovacuum': 'off'} + if self.pg_config_version >= 120000: + pg_options['wal_recycle'] = 'off' + + node = self.pg_node.make_simple('node', + set_replication=True, + ptrack_enable=self.ptrack, + ) + node.slow_start() + + start_lsn = node.execute( + "postgres", + "select pg_current_wal_lsn()")[0][0] + + self.pb.init() + self.pb.add_instance('node', node) + + node.execute( + "postgres", + "CREATE EXTENSION ptrack") + + # Initialize and start pgbench + node.pgbench_init(scale=100) + + pgbench = node.pgbench(options=['-T', '20', '-c', '150', '-j', '150']) + pgbench.wait() + + node.execute( + "postgres", + "CHECKPOINT;select txid_current();") + + missed_pages = [] + # Process each file in the data directory + for root, dirs, files in os.walk(node.data_dir): + # Process only the files in the 'global' and 'base' directories + if 'data/global' in root or 'data/base' in root or 'data/pg_tblspc' in root: + for file in files: + if file in ['ptrack.map']: + continue + file_path = os.path.join(root, file) + pagemapset = self.fetch_ptrack(node, start_lsn) + pages = self.find_zero_pages(node, pagemapset, file_path) + if pages: + missed_pages.extend(pages) + # Check that no missed pages + self.assertEqual(missed_pages, []) diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index 7b5bc416b..29eb2f11a 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -1,38 +1,33 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess -from testgres import QueryException, StartNodeException +from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack +from pg_probackup2.gdb import needs_gdb +from testgres import StartNodeException import shutil -import sys from time import sleep from threading import Thread -class PtrackTest(ProbackupTest, unittest.TestCase): +class PtrackTest(ProbackupTest): def setUp(self): - if self.pg_config_version < self.version_to_num('11.0'): - self.skipTest('You need PostgreSQL >= 11 for this test') - self.fname = self.id().split('.')[3] + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') # @unittest.skip("skip") + @needs_gdb def test_drop_rel_during_backup_ptrack(self): """ drop relation during ptrack backup """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -51,11 +46,10 @@ def test_drop_rel_during_backup_ptrack(self): absolute_path = os.path.join(node.data_dir, relative_path) # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # PTRACK backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + gdb = self.pb.backup_node('node', node, backup_type='ptrack', gdb=True, options=['--log-level-file=LOG']) gdb.set_breakpoint('backup_files') @@ -69,14 +63,13 @@ def test_drop_rel_during_backup_ptrack(self): pgdata = self.pgdata_content(node.data_dir) - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - self.assertTrue( + log_content = self.read_pb_log() + self.assertTrue( 'LOG: File not found: "{0}"'.format(absolute_path) in log_content, 'File "{0}" should be deleted but it`s not'.format(absolute_path)) node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node=node, options=["-j", "4"]) # Physical comparison pgdata_restored = self.pgdata_content(node.data_dir) @@ -85,67 +78,52 @@ def test_drop_rel_during_backup_ptrack(self): # @unittest.skip("skip") def test_ptrack_without_full(self): """ptrack backup without validated full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( "postgres", "CREATE EXTENSION ptrack") - try: - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type="ptrack", + expect_error="because page backup should not be " + "possible without valid full backup") + self.assertMessage(contains="WARNING: Valid full backup on current timeline 1 is not found") + self.assertMessage(contains="ERROR: Create new full backup before an incremental one") self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['status'], + self.pb.show('node')[0]['status'], "ERROR") # @unittest.skip("skip") def test_ptrack_threads(self): """ptrack multi thread backup mode""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( "postgres", "CREATE EXTENSION ptrack") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="full", options=["-j", "4"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + self.assertEqual(self.pb.show('node')[0]['status'], "OK") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type="ptrack", options=["-j", "4"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + self.assertEqual(self.pb.show('node')[0]['status'], "OK") # @unittest.skip("skip") def test_ptrack_stop_pg(self): @@ -154,15 +132,13 @@ def test_ptrack_stop_pg(self): restart node, check that ptrack backup can be taken """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -172,13 +148,12 @@ def test_ptrack_stop_pg(self): node.pgbench_init(scale=1) # FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.stop() node.slow_start() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) # @unittest.skip("skip") @@ -187,16 +162,14 @@ def test_ptrack_multi_timeline_backup(self): t2 /------P2 t1 ------F---*-----P1 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -206,7 +179,7 @@ def test_ptrack_multi_timeline_backup(self): node.pgbench_init(scale=5) # FULL backup - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) sleep(15) @@ -216,13 +189,12 @@ def test_ptrack_multi_timeline_backup(self): 'SELECT txid_current()').decode('utf-8').rstrip() pgbench.wait() - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + self.pb.backup_node('node', node, backup_type='ptrack') node.cleanup() # Restore from full backup to create Timeline 2 - print(self.restore_node( - backup_dir, 'node', node, + print(self.pb.restore_node('node', node, options=[ '--recovery-target-xid={0}'.format(xid), '--recovery-target-action=promote'])) @@ -232,13 +204,13 @@ def test_ptrack_multi_timeline_backup(self): pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + self.pb.backup_node('node', node, backup_type='ptrack') pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -264,16 +236,13 @@ def test_ptrack_multi_timeline_backup_1(self): t2 /------P2 t1 ---F--------* """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -283,15 +252,15 @@ def test_ptrack_multi_timeline_backup_1(self): node.pgbench_init(scale=5) # FULL backup - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() - ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + ptrack_id = self.pb.backup_node('node', node, backup_type='ptrack') node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) node.slow_start() @@ -299,16 +268,16 @@ def test_ptrack_multi_timeline_backup_1(self): pgbench.wait() # delete old PTRACK backup - self.delete_pb(backup_dir, 'node', backup_id=ptrack_id) + self.pb.delete('node', backup_id=ptrack_id) # take new PTRACK backup - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + self.pb.backup_node('node', node, backup_type='ptrack') pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -330,16 +299,14 @@ def test_ptrack_eat_my_data(self): """ PGPRO-4051 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -348,10 +315,9 @@ def test_ptrack_eat_my_data(self): node.pgbench_init(scale=50) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum']) @@ -360,12 +326,12 @@ def test_ptrack_eat_my_data(self): sleep(2) - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') + self.pb.backup_node('node', node, backup_type='ptrack') # pgdata = self.pgdata_content(node.data_dir) # # node_restored.cleanup() # -# self.restore_node(backup_dir, 'node', node_restored) +# self.pb.restore_node('node', node=node_restored) # pgdata_restored = self.pgdata_content(node_restored.data_dir) # # self.compare_pgdata(pgdata, pgdata_restored) @@ -378,9 +344,8 @@ def test_ptrack_eat_my_data(self): result = node.table_checksum("pgbench_accounts") node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + self.pb.restore_node('node', node=node_restored) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() @@ -403,38 +368,34 @@ def test_ptrack_eat_my_data(self): def test_ptrack_simple(self): """make node, make full and ptrack stream backups," " restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql( "postgres", "create table t_heap as select i" " as id from generate_series(0,1) i") - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) node.safe_psql( "postgres", "update t_heap set id = 100500") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) if self.paranoia: @@ -442,12 +403,10 @@ def test_ptrack_simple(self): result = node.table_checksum("t_heap") - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) # Physical comparison if self.paranoia: @@ -455,8 +414,7 @@ def test_ptrack_simple(self): node_restored.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() @@ -468,95 +426,22 @@ def test_ptrack_simple(self): # @unittest.skip("skip") def test_ptrack_unprivileged(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - # self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + # self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + # PG < 15 + if self.pg_config_version < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -642,18 +527,16 @@ def test_ptrack_unprivileged(self): "backupdb", "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup") - if ProbackupTest.enterprise: + if ProbackupTest.pgpro: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, datname='backupdb', options=['--stream', "-U", "backup"]) - self.backup_node( - backup_dir, 'node', node, datname='backupdb', + self.pb.backup_node('node', node, datname='backupdb', backup_type='ptrack', options=['--stream', "-U", "backup"]) @@ -661,16 +544,14 @@ def test_ptrack_unprivileged(self): # @unittest.expectedFailure def test_ptrack_enable(self): """make ptrack without full backup, should result in error""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', + set_replication=True, pg_options={ 'checkpoint_timeout': '30s', 'shared_preload_libraries': 'ptrack'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -678,26 +559,10 @@ def test_ptrack_enable(self): "CREATE EXTENSION ptrack") # PTRACK BACKUP - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"] - ) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because ptrack disabled.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd - ) - ) - except ProbackupException as e: - self.assertIn( - 'ERROR: Ptrack is disabled\n', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd) - ) + self.pb.backup_node('node', node, backup_type='ptrack', + options=["--stream"], + expect_error="because ptrack disabled") + self.assertMessage(contains='ERROR: Ptrack is disabled') # @unittest.skip("skip") # @unittest.expectedFailure @@ -707,16 +572,14 @@ def test_ptrack_disable(self): enable ptrack, restart postgresql, take ptrack backup which should fail """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -724,7 +587,7 @@ def test_ptrack_disable(self): "CREATE EXTENSION ptrack") # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # DISABLE PTRACK node.safe_psql('postgres', "alter system set ptrack.map_size to 0") @@ -738,77 +601,53 @@ def test_ptrack_disable(self): node.slow_start() # PTRACK BACKUP - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"] - ) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because ptrack_enable was set to OFF at some" - " point after previous backup.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd - ) - ) - except ProbackupException as e: - self.assertIn( - 'ERROR: LSN from ptrack_control', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd - ) - ) + self.pb.backup_node('node', node, backup_type='ptrack', + options=["--stream"], + expect_error="because ptrack_enable was set to OFF " + "at some point after previous backup") + self.assertMessage(contains='ERROR: LSN from ptrack_control') # @unittest.skip("skip") def test_ptrack_uncommitted_xact(self): """make ptrack backup while there is uncommitted open transaction""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'wal_level': 'replica'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) con = node.connect("postgres") con.execute( "create table t_heap as select i" " as id from generate_series(0,1) i") - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, - node_restored.data_dir, options=["-j", "4"]) + self.pb.restore_node('node', node_restored, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content( node_restored.data_dir, ignore_ptrack=False) - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() @@ -817,20 +656,18 @@ def test_ptrack_uncommitted_xact(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - def test_ptrack_vacuum_full(self): + @needs_gdb + def test_ptrack_vacuum_full_1(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -839,7 +676,7 @@ def test_ptrack_vacuum_full(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql( "postgres", @@ -858,33 +695,28 @@ def test_ptrack_vacuum_full(self): target=pg_connect.execute, args=["VACUUM FULL t_heap"]) process.start() - while not gdb.stopped_in_breakpoint: - sleep(1) + gdb.stopped_in_breakpoint() gdb.continue_execution_until_break(20) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) gdb.remove_all_breakpoints() - gdb._execute('detach') + gdb.detach() process.join() - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=["-j", "4", "-T", "{0}={1}".format( old_tablespace, new_tablespace)] ) @@ -895,8 +727,7 @@ def test_ptrack_vacuum_full(self): node_restored.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() @@ -906,15 +737,13 @@ def test_ptrack_vacuum_truncate(self): delete last 3 pages, vacuum relation, take ptrack backup, take second ptrack backup, restore last ptrack backup and check data correctness""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -935,7 +764,7 @@ def test_ptrack_vacuum_truncate(self): "postgres", "vacuum t_heap") - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql( "postgres", @@ -945,24 +774,20 @@ def test_ptrack_vacuum_truncate(self): "postgres", "vacuum t_heap") - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new') - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=["-j", "4", "-T", "{0}={1}".format( old_tablespace, new_tablespace)] ) @@ -975,28 +800,25 @@ def test_ptrack_vacuum_truncate(self): ) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # @unittest.skip("skip") + @needs_gdb def test_ptrack_get_block(self): """ make node, make full and ptrack stream backups, restore them and check data correctness """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -1008,9 +830,8 @@ def test_ptrack_get_block(self): "create table t_heap as select i" " as id from generate_series(0,1) i") - self.backup_node(backup_dir, 'node', node, options=['--stream']) - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, options=['--stream']) + gdb = self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream'], gdb=True) @@ -1023,8 +844,7 @@ def test_ptrack_get_block(self): gdb.continue_execution_until_exit() - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) if self.paranoia: @@ -1032,7 +852,7 @@ def test_ptrack_get_block(self): result = node.table_checksum("t_heap") node.cleanup() - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node=node, options=["-j", "4"]) # Physical comparison if self.paranoia: @@ -1050,17 +870,15 @@ def test_ptrack_get_block(self): def test_ptrack_stream(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -1076,8 +894,7 @@ def test_ptrack_stream(self): " as tsvector from generate_series(0,100) i") full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + full_backup_id = self.pb.backup_node('node', node, options=['--stream']) # PTRACK BACKUP node.safe_psql( @@ -1087,8 +904,7 @@ def test_ptrack_stream(self): " from generate_series(100,200) i") ptrack_result = node.table_checksum("t_heap") - ptrack_backup_id = self.backup_node( - backup_dir, 'node', node, + ptrack_backup_id = self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) if self.paranoia: @@ -1098,29 +914,20 @@ def test_ptrack_stream(self): node.cleanup() # Restore and check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=full_backup_id, options=["-j", "4"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd) ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(full_backup_id)) node.slow_start() full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() # Restore and check ptrack backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(ptrack_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=ptrack_backup_id, options=["-j", "4"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(ptrack_backup_id)) if self.paranoia: pgdata_restored = self.pgdata_content( @@ -1135,18 +942,16 @@ def test_ptrack_stream(self): def test_ptrack_archive(self): """make archive node, make full and ptrack backups, check data correctness in restored instance""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1163,9 +968,8 @@ def test_ptrack_archive(self): " from generate_series(0,100) i") full_result = node.table_checksum("t_heap") - full_backup_id = self.backup_node(backup_dir, 'node', node) - full_target_time = self.show_pb( - backup_dir, 'node', full_backup_id)['recovery-time'] + full_backup_id = self.pb.backup_node('node', node) + full_target_time = self.pb.show('node', full_backup_id)['recovery-time'] # PTRACK BACKUP node.safe_psql( @@ -1176,10 +980,8 @@ def test_ptrack_archive(self): " from generate_series(100,200) i") ptrack_result = node.table_checksum("t_heap") - ptrack_backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack') - ptrack_target_time = self.show_pb( - backup_dir, 'node', ptrack_backup_id)['recovery-time'] + ptrack_backup_id = self.pb.backup_node('node', node, backup_type='ptrack') + ptrack_target_time = self.pb.show('node', ptrack_backup_id)['recovery-time'] if self.paranoia: pgdata = self.pgdata_content(node.data_dir) @@ -1194,18 +996,13 @@ def test_ptrack_archive(self): node.cleanup() # Check full backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(full_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=full_backup_id, options=[ "-j", "4", "--recovery-target-action=promote", - "--time={0}".format(full_target_time)] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd) - ) + "--recovery-target-time={0}".format(full_target_time)] + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(full_backup_id)) node.slow_start() full_result_new = node.table_checksum("t_heap") @@ -1213,19 +1010,14 @@ def test_ptrack_archive(self): node.cleanup() # Check ptrack backup - self.assertIn( - "INFO: Restore of backup {0} completed.".format(ptrack_backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, backup_id=ptrack_backup_id, options=[ "-j", "4", - "--time={0}".format(ptrack_target_time), + "--recovery-target-time={0}".format(ptrack_target_time), "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd) - ) + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(ptrack_backup_id)) if self.paranoia: pgdata_restored = self.pgdata_content( @@ -1238,227 +1030,21 @@ def test_ptrack_archive(self): node.cleanup() - @unittest.skip("skip") - def test_ptrack_pgpro417(self): - """ - Make node, take full backup, take ptrack backup, - delete ptrack backup. Try to take ptrack backup, - which should fail. Actual only for PTRACK 1.x - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='full', options=["--stream"]) - - start_lsn_full = self.show_pb( - backup_dir, 'node', backup_id)['start-lsn'] - - # PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(100,200) i") - node.table_checksum("t_heap") - backup_id = self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - - start_lsn_ptrack = self.show_pb( - backup_dir, 'node', backup_id)['start-lsn'] - - self.delete_pb(backup_dir, 'node', backup_id) - - # SECOND PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(200,300) i") - - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of LSN mismatch from ptrack_control " - "and previous backup start_lsn.\n" - " Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: LSN from ptrack_control' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - @unittest.skip("skip") - def test_page_pgpro417(self): - """ - Make archive node, take full backup, take page backup, - delete page backup. Try to take ptrack backup, which should fail. - Actual only for PTRACK 1.x - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.table_checksum("t_heap") - - # PAGE BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(100,200) i") - node.table_checksum("t_heap") - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - self.delete_pb(backup_dir, 'node', backup_id) -# sys.exit(1) - - # PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(200,300) i") - - try: - self.backup_node(backup_dir, 'node', node, backup_type='ptrack') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of LSN mismatch from ptrack_control " - "and previous backup start_lsn.\n " - "Output: {0}\n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: LSN from ptrack_control' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - @unittest.skip("skip") - def test_full_pgpro417(self): - """ - Make node, take two full backups, delete full second backup. - Try to take ptrack backup, which should fail. - Relevant only for PTRACK 1.x - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'checkpoint_timeout': '30s'}) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # FULL BACKUP - node.safe_psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text," - " md5(i::text)::tsvector as tsvector " - " from generate_series(0,100) i" - ) - node.table_checksum("t_heap") - self.backup_node(backup_dir, 'node', node, options=["--stream"]) - - # SECOND FULL BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text," - " md5(i::text)::tsvector as tsvector" - " from generate_series(100,200) i" - ) - node.table_checksum("t_heap") - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) - - self.delete_pb(backup_dir, 'node', backup_id) - - # PTRACK BACKUP - node.safe_psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(i::text)::tsvector as tsvector " - "from generate_series(200,300) i") - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=["--stream"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of LSN mismatch from ptrack_control " - "and previous backup start_lsn.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertTrue( - "ERROR: LSN from ptrack_control" in e.message and - "Create new full backup before " - "an incremental one" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - # @unittest.skip("skip") def test_create_db(self): """ Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '10GB'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -1472,8 +1058,7 @@ def test_create_db(self): "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") node.table_checksum("t_heap") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=["--stream"]) # CREATE DATABASE DB1 @@ -1484,20 +1069,17 @@ def test_create_db(self): "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") # PTRACK BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"]) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, backup_id=backup_id, options=["-j", "4"]) # COMPARE PHYSICAL CONTENT @@ -1507,16 +1089,14 @@ def test_create_db(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # DROP DATABASE DB1 node.safe_psql( "postgres", "drop database db1") # SECOND PTRACK BACKUP - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"] ) @@ -1525,8 +1105,7 @@ def test_create_db(self): # RESTORE SECOND PTRACK BACKUP node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, backup_id=backup_id, options=["-j", "4"]) # COMPARE PHYSICAL CONTENT @@ -1536,23 +1115,12 @@ def test_create_db(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() - try: - node_restored.safe_psql('db1', 'select 1') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are connecting to deleted database" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except QueryException as e: - self.assertTrue( - 'FATAL: database "db1" does not exist' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + error_result = node_restored.safe_psql('db1', 'select 1', expect_error=True) + + self.assertMessage(error_result, contains='FATAL: database "db1" does not exist') # @unittest.skip("skip") def test_create_db_on_replica(self): @@ -1562,17 +1130,15 @@ def test_create_db_on_replica(self): create database db1, take ptrack backup from replica, restore database and check it presense """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -1585,29 +1151,20 @@ def test_create_db_on_replica(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.pb.backup_node('node', node, options=['-j10', '--stream']) - self.restore_node(backup_dir, 'node', replica) + self.pb.restore_node('node', node=replica) # Add replica - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(node, replica, 'replica', synchronous=True) replica.slow_start(replica=True) - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port), - '--stream' - ] + self.pb.backup_node('replica', replica, + options=['-j10', '--stream'] ) # CREATE DATABASE DB1 @@ -1622,28 +1179,19 @@ def test_create_db_on_replica(self): replica.safe_psql('postgres', 'checkpoint') # PTRACK BACKUP - backup_id = self.backup_node( - backup_dir, 'replica', + backup_id = self.pb.backup_node('replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port) - ] + options=['-j10', '--stream'] ) if self.paranoia: pgdata = self.pgdata_content(replica.data_dir) # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'replica', node_restored, + self.pb.restore_node('replica', node_restored, backup_id=backup_id, options=["-j", "4"]) # COMPARE PHYSICAL CONTENT @@ -1656,17 +1204,15 @@ def test_create_db_on_replica(self): def test_alter_table_set_tablespace_ptrack(self): """Make node, create tablespace with table, take full backup, alter tablespace location, take ptrack backup, restore database.""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -1681,7 +1227,7 @@ def test_alter_table_set_tablespace_ptrack(self): " md5(i::text) as text, md5(i::text)::tsvector as tsvector" " from generate_series(0,100) i") # FULL backup - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) # ALTER TABLESPACE self.create_tblspace_in_node(node, 'somedata_new') @@ -1692,8 +1238,7 @@ def test_alter_table_set_tablespace_ptrack(self): # sys.exit(1) # PTRACK BACKUP #result = node.table_checksum("t_heap") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"] ) @@ -1703,12 +1248,10 @@ def test_alter_table_set_tablespace_ptrack(self): # node.cleanup() # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, options=[ "-j", "4", "-T", "{0}={1}".format( @@ -1729,8 +1272,7 @@ def test_alter_table_set_tablespace_ptrack(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() # result_new = node_restored.table_checksum("t_heap") @@ -1742,17 +1284,15 @@ def test_alter_database_set_tablespace_ptrack(self): """Make node, create tablespace with database," " take full backup, alter tablespace location," " take ptrack backup, restore database.""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -1760,7 +1300,7 @@ def test_alter_database_set_tablespace_ptrack(self): "CREATE EXTENSION ptrack") # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) # CREATE TABLESPACE self.create_tblspace_in_node(node, 'somedata') @@ -1771,8 +1311,7 @@ def test_alter_database_set_tablespace_ptrack(self): "alter database postgres set tablespace somedata") # PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"]) if self.paranoia: @@ -1780,11 +1319,9 @@ def test_alter_database_set_tablespace_ptrack(self): node.stop() # RESTORE - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored, options=[ "-j", "4", @@ -1808,17 +1345,15 @@ def test_drop_tablespace(self): Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -1835,30 +1370,27 @@ def test_drop_tablespace(self): result = node.table_checksum("t_heap") # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) # Move table to tablespace 'somedata' node.safe_psql( "postgres", "alter table t_heap set tablespace somedata") # PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"]) # Move table back to default tablespace node.safe_psql( "postgres", "alter table t_heap set tablespace pg_default") # SECOND PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"]) # DROP TABLESPACE 'somedata' node.safe_psql( "postgres", "drop tablespace somedata") # THIRD PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"]) if self.paranoia: @@ -1868,7 +1400,7 @@ def test_drop_tablespace(self): tblspace = self.get_tblspace_path(node, 'somedata') node.cleanup() shutil.rmtree(tblspace, ignore_errors=True) - self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + self.pb.restore_node('node', node=node, options=["-j", "4"]) if self.paranoia: pgdata_restored = self.pgdata_content( @@ -1899,17 +1431,15 @@ def test_ptrack_alter_tablespace(self): Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -1927,7 +1457,7 @@ def test_ptrack_alter_tablespace(self): result = node.table_checksum("t_heap") # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) # Move table to separate tablespace node.safe_psql( @@ -1937,8 +1467,7 @@ def test_ptrack_alter_tablespace(self): result = node.table_checksum("t_heap") # FIRTS PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"]) # GET PHYSICAL CONTENT FROM NODE @@ -1946,12 +1475,11 @@ def test_ptrack_alter_tablespace(self): pgdata = self.pgdata_content(node.data_dir) # Restore ptrack backup - restored_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) + restored_node = self.pg_node.make_simple('restored_node') restored_node.cleanup() tblspc_path_new = self.get_tblspace_path( restored_node, 'somedata_restored') - self.restore_node(backup_dir, 'node', restored_node, options=[ + self.pb.restore_node('node', node=restored_node, options=[ "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT @@ -1961,8 +1489,7 @@ def test_ptrack_alter_tablespace(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf( - restored_node, {'port': restored_node.port}) + restored_node.set_auto_conf({'port': restored_node.port}) restored_node.slow_start() # COMPARE LOGICAL CONTENT @@ -1976,16 +1503,14 @@ def test_ptrack_alter_tablespace(self): node.safe_psql( "postgres", "alter table t_heap set tablespace pg_default") # SECOND PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + self.pb.backup_node('node', node, backup_type='ptrack', options=["--stream"]) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) # Restore second ptrack backup and check table consistency - self.restore_node( - backup_dir, 'node', restored_node, + self.pb.restore_node('node', restored_node, options=[ "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) @@ -1996,8 +1521,7 @@ def test_ptrack_alter_tablespace(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - self.set_auto_conf( - restored_node, {'port': restored_node.port}) + restored_node.set_auto_conf({'port': restored_node.port}) restored_node.slow_start() result_new = restored_node.table_checksum("t_heap") @@ -2009,17 +1533,15 @@ def test_ptrack_multiple_segments(self): Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'full_page_writes': 'off'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -2032,7 +1554,7 @@ def test_ptrack_multiple_segments(self): node.pgbench_init(scale=100, options=['--tablespace=somedata']) result = node.table_checksum("pgbench_accounts") # FULL BACKUP - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # PTRACK STUFF if node.major_version < 11: @@ -2069,23 +1591,20 @@ def test_ptrack_multiple_segments(self): # it`s stupid, because hint`s are ignored by ptrack result = node.table_checksum("pgbench_accounts") # FIRTS PTRACK BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) # GET PHYSICAL CONTENT FROM NODE pgdata = self.pgdata_content(node.data_dir) # RESTORE NODE - restored_node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) + restored_node = self.pg_node.make_simple('restored_node') restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( restored_node, 'somedata_restored') - self.restore_node( - backup_dir, 'node', restored_node, + self.pb.restore_node('node', restored_node, options=[ "-j", "4", "-T", "{0}={1}".format( tblspc_path, tblspc_path_new)]) @@ -2096,8 +1615,7 @@ def test_ptrack_multiple_segments(self): restored_node.data_dir, ignore_ptrack=False) # START RESTORED NODE - self.set_auto_conf( - restored_node, {'port': restored_node.port}) + restored_node.set_auto_conf({'port': restored_node.port}) restored_node.slow_start() result_new = restored_node.table_checksum("pgbench_accounts") @@ -2108,307 +1626,16 @@ def test_ptrack_multiple_segments(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - @unittest.skip("skip") - def test_atexit_fail(self): - """ - Take backups of every available types and check that PTRACK is clean. - Relevant only for PTRACK 1.x - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'max_connections': '15'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - try: - self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', - options=["--stream", "-j 30"]) - - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because we are opening too many connections" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertIn( - 'setting its status to ERROR', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd) - ) - - self.assertEqual( - node.safe_psql( - "postgres", - "select * from pg_is_in_backup()").rstrip(), - "f") - - @unittest.skip("skip") - # @unittest.expectedFailure - def test_ptrack_clean(self): - """ - Take backups of every available types and check that PTRACK is clean - Relevant only for PTRACK 1.x - """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - node.slow_start() - - self.create_tblspace_in_node(node, 'somedata') - - # Create table and indexes - node.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap tablespace somedata " - "as select i as id, nextval('t_seq') as t_seq, " - "md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - node.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3}) " - "tablespace somedata".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, - options=['-j10', '--stream']) - node.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get fork size and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(node, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # Update everything and vacuum it - node.safe_psql( - 'postgres', - "update t_heap set t_seq = nextval('t_seq'), " - "text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") - node.safe_psql('postgres', 'vacuum t_heap') - - # Take PTRACK backup to clean every ptrack - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', options=['-j10', '--stream']) - - node.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get new size of heap and indexes and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(node, i) - # update path to heap and index files in case they`ve changed - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack bits are cleaned - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # Update everything and vacuum it - node.safe_psql( - 'postgres', - "update t_heap set t_seq = nextval('t_seq'), " - "text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") - node.safe_psql('postgres', 'vacuum t_heap') - - # Take PAGE backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['-j10', '--stream']) - node.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get new size of heap and indexes and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(node, i) - # update path to heap and index files in case they`ve changed - idx_ptrack[i]['path'] = self.get_fork_path(node, i) - # # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack bits are cleaned - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - @unittest.skip("skip") - def test_ptrack_clean_replica(self): - """ - Take backups of every available types from - master and check that PTRACK on replica is clean. - Relevant only for PTRACK 1.x - """ - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'archive_timeout': '30s'}) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - master.slow_start() - - self.backup_node(backup_dir, 'master', master, options=['--stream']) - - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) - replica.cleanup() - - self.restore_node(backup_dir, 'master', replica) - - self.add_instance(backup_dir, 'replica', replica) - self.set_replica(master, replica, synchronous=True) - replica.slow_start(replica=True) - - # Create table and indexes - master.safe_psql( - "postgres", - "create extension bloom; create sequence t_seq; " - "create table t_heap as select i as id, " - "nextval('t_seq') as t_seq, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") - for i in idx_ptrack: - if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq': - master.safe_psql( - "postgres", - "create index {0} on {1} using {2}({3})".format( - i, idx_ptrack[i]['relation'], - idx_ptrack[i]['type'], - idx_ptrack[i]['column'])) - - # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, - 'replica', - replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - master.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get fork size and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(replica, i) - # get path to heap and index files - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # Update everything and vacuum it - master.safe_psql( - 'postgres', - "update t_heap set t_seq = nextval('t_seq'), " - "text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") - master.safe_psql('postgres', 'vacuum t_heap') - - # Take PTRACK backup to clean every ptrack - backup_id = self.backup_node( - backup_dir, - 'replica', - replica, - backup_type='ptrack', - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - master.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get new size of heap and indexes and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(replica, i) - # update path to heap and index files in case they`ve changed - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack bits are cleaned - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - - # Update everything and vacuum it - master.safe_psql( - 'postgres', - "update t_heap set t_seq = nextval('t_seq'), text = md5(text), " - "tsvector = md5(repeat(tsvector::text, 10))::tsvector;") - master.safe_psql('postgres', 'vacuum t_heap') - master.safe_psql('postgres', 'checkpoint') - - # Take PAGE backup to clean every ptrack - self.backup_node( - backup_dir, - 'replica', - replica, - backup_type='page', - options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) - master.safe_psql('postgres', 'checkpoint') - - for i in idx_ptrack: - # get new size of heap and indexes and calculate it in pages - idx_ptrack[i]['size'] = self.get_fork_size(replica, i) - # update path to heap and index files in case they`ve changed - idx_ptrack[i]['path'] = self.get_fork_path(replica, i) - # # get ptrack for every idx - idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( - replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']]) - # check that ptrack bits are cleaned - self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_cluster_on_btree(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -2447,8 +1674,7 @@ def test_ptrack_cluster_on_btree(self): idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.pb.backup_node('node', node, options=['-j10', '--stream']) node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') node.safe_psql('postgres', 'cluster t_heap using t_btree') @@ -2460,15 +1686,13 @@ def test_ptrack_cluster_on_btree(self): # @unittest.skip("skip") def test_ptrack_cluster_on_gist(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -2503,8 +1727,7 @@ def test_ptrack_cluster_on_gist(self): idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.pb.backup_node('node', node, options=['-j10', '--stream']) node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') node.safe_psql('postgres', 'cluster t_heap using t_gist') @@ -2514,29 +1737,26 @@ def test_ptrack_cluster_on_gist(self): if node.major_version < 11: self.check_ptrack_map_sanity(node, idx_ptrack) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") def test_ptrack_cluster_on_btree_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() if master.major_version >= 11: @@ -2544,15 +1764,14 @@ def test_ptrack_cluster_on_btree_replica(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, synchronous=True) replica.slow_start(replica=True) @@ -2577,11 +1796,7 @@ def test_ptrack_cluster_on_btree_replica(self): master.safe_psql('postgres', 'vacuum t_heap') master.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + self.pb.backup_node('replica', replica, options=['-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2604,31 +1819,28 @@ def test_ptrack_cluster_on_btree_replica(self): if master.major_version < 11: self.check_ptrack_map_sanity(replica, idx_ptrack) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + node = self.pg_node.make_simple('node') node.cleanup() - self.restore_node(backup_dir, 'replica', node) + self.pb.restore_node('replica', node=node) pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") def test_ptrack_cluster_on_gist_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() if master.major_version >= 11: @@ -2636,15 +1848,14 @@ def test_ptrack_cluster_on_gist_replica(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, 'replica', synchronous=True) replica.slow_start(replica=True) @@ -2673,11 +1884,8 @@ def test_ptrack_cluster_on_gist_replica(self): self.wait_until_replica_catch_with_master(master, replica) replica.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + self.pb.backup_node('replica', replica, options=[ + '-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2701,18 +1909,16 @@ def test_ptrack_cluster_on_gist_replica(self): replica.safe_psql('postgres', 'CHECKPOINT') self.check_ptrack_map_sanity(replica, idx_ptrack) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='ptrack', options=['-j10', '--stream']) if self.paranoia: pgdata = self.pgdata_content(replica.data_dir) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + node = self.pg_node.make_simple('node') node.cleanup() - self.restore_node(backup_dir, 'replica', node) + self.pb.restore_node('replica', node) if self.paranoia: pgdata_restored = self.pgdata_content(replica.data_dir) @@ -2722,15 +1928,13 @@ def test_ptrack_cluster_on_gist_replica(self): # @unittest.expectedFailure def test_ptrack_empty(self): """Take backups of every available types and check that PTRACK is clean""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -2748,8 +1952,7 @@ def test_ptrack_empty(self): "tablespace somedata") # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=['-j10', '--stream']) # Create indexes @@ -2765,23 +1968,20 @@ def test_ptrack_empty(self): node.safe_psql('postgres', 'checkpoint') - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() tblspace1 = self.get_tblspace_path(node, 'somedata') tblspace2 = self.get_tblspace_path(node_restored, 'somedata') # Take PTRACK backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='ptrack', + backup_id = self.pb.backup_node('node', node, backup_type='ptrack', options=['-j10', '--stream']) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - self.restore_node( - backup_dir, 'node', node_restored, + self.pb.restore_node('node', node_restored, backup_id=backup_id, options=[ "-j", "4", @@ -2798,15 +1998,13 @@ def test_ptrack_empty_replica(self): Take backups of every available types from master and check that PTRACK on replica is clean """ - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() if master.major_version >= 11: @@ -2814,15 +2012,14 @@ def test_ptrack_empty_replica(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, synchronous=True) replica.slow_start(replica=True) @@ -2835,15 +2032,10 @@ def test_ptrack_empty_replica(self): self.wait_until_replica_catch_with_master(master, replica) # Take FULL backup - self.backup_node( - backup_dir, + self.pb.backup_node( 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) # Create indexes for i in idx_ptrack: @@ -2858,26 +2050,19 @@ def test_ptrack_empty_replica(self): self.wait_until_replica_catch_with_master(master, replica) # Take PTRACK backup - backup_id = self.backup_node( - backup_dir, + backup_id = self.pb.backup_node( 'replica', replica, backup_type='ptrack', - options=[ - '-j1', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j1', '--stream']) if self.paranoia: pgdata = self.pgdata_content(replica.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'replica', node_restored, + self.pb.restore_node('replica', node_restored, backup_id=backup_id, options=["-j", "4"]) if self.paranoia: @@ -2887,15 +2072,13 @@ def test_ptrack_empty_replica(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_truncate(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -2923,8 +2106,7 @@ def test_ptrack_truncate(self): i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql('postgres', 'truncate t_heap') node.safe_psql('postgres', 'checkpoint') @@ -2940,8 +2122,7 @@ def test_ptrack_truncate(self): idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) # Make backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(node.data_dir) @@ -2957,26 +2138,24 @@ def test_ptrack_truncate(self): self.get_tblspace_path(node, 'somedata'), ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") def test_basic_ptrack_truncate_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'max_wal_size': '32MB', 'archive_timeout': '10s', 'checkpoint_timeout': '5min'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() if master.major_version >= 11: @@ -2984,15 +2163,14 @@ def test_basic_ptrack_truncate_replica(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, 'replica', synchronous=True) replica.slow_start(replica=True) @@ -3028,14 +2206,8 @@ def test_basic_ptrack_truncate_replica(self): idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) # Make backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + self.pb.backup_node('replica', replica, + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3057,29 +2229,22 @@ def test_basic_ptrack_truncate_replica(self): "postgres", "select pg_wal_replay_pause()") - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + self.pb.backup_node('replica', replica, backup_type='ptrack', + options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + node = self.pg_node.make_simple('node') node.cleanup() - self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) + self.pb.restore_node('replica', node) pgdata_restored = self.pgdata_content(node.data_dir) if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node, {'port': node.port}) + node.set_auto_conf({'port': node.port}) node.slow_start() @@ -3090,15 +2255,13 @@ def test_basic_ptrack_truncate_replica(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -3131,8 +2294,7 @@ def test_ptrack_vacuum(self): node.safe_psql('postgres', 'checkpoint') # Make full backup to clean every ptrack - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.pb.backup_node('node', node, options=['-j10', '--stream']) if node.major_version < 11: for i in idx_ptrack: @@ -3156,8 +2318,7 @@ def test_ptrack_vacuum(self): if node.major_version < 11: self.check_ptrack_map_sanity(node, idx_ptrack) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(node.data_dir) @@ -3167,24 +2328,22 @@ def test_ptrack_vacuum(self): self.get_tblspace_path(node, 'somedata'), ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) # @unittest.skip("skip") def test_ptrack_vacuum_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() if master.major_version >= 11: @@ -3192,15 +2351,14 @@ def test_ptrack_vacuum_replica(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, 'replica', synchronous=True) replica.slow_start(replica=True) @@ -3228,12 +2386,7 @@ def test_ptrack_vacuum_replica(self): replica.safe_psql('postgres', 'checkpoint') # Make FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + self.pb.backup_node('replica', replica, options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3261,17 +2414,15 @@ def test_ptrack_vacuum_replica(self): if replica.major_version < 11: self.check_ptrack_map_sanity(master, idx_ptrack) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + node = self.pg_node.make_simple('node') node.cleanup() - self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) + self.pb.restore_node('replica', node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -3279,15 +2430,13 @@ def test_ptrack_vacuum_replica(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_bits_frozen(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -3317,8 +2466,7 @@ def test_ptrack_vacuum_bits_frozen(self): comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) node.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.pb.backup_node('node', node, options=['-j10', '--stream']) node.safe_psql('postgres', 'vacuum freeze t_heap') node.safe_psql('postgres', 'checkpoint') @@ -3337,8 +2485,7 @@ def test_ptrack_vacuum_bits_frozen(self): if node.major_version < 11: self.check_ptrack_map_sanity(node, idx_ptrack) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(node.data_dir) @@ -3347,22 +2494,20 @@ def test_ptrack_vacuum_bits_frozen(self): self.get_tblspace_path(node, 'somedata'), ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) # @unittest.skip("skip") def test_ptrack_vacuum_bits_frozen_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() if master.major_version >= 11: @@ -3370,15 +2515,14 @@ def test_ptrack_vacuum_bits_frozen_replica(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, synchronous=True) replica.slow_start(replica=True) @@ -3405,14 +2549,8 @@ def test_ptrack_vacuum_bits_frozen_replica(self): replica.safe_psql('postgres', 'checkpoint') # Take backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + self.pb.backup_node('replica', replica, + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3435,14 +2573,13 @@ def test_ptrack_vacuum_bits_frozen_replica(self): if replica.major_version < 11: self.check_ptrack_map_sanity(master, idx_ptrack) - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', + self.pb.backup_node('replica', replica, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) replica.cleanup() - self.restore_node(backup_dir, 'replica', replica) + self.pb.restore_node('replica', node=replica) pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -3450,15 +2587,13 @@ def test_ptrack_vacuum_bits_frozen_replica(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_bits_visibility(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -3488,8 +2623,7 @@ def test_ptrack_vacuum_bits_visibility(self): comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node) node.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.pb.backup_node('node', node, options=['-j10', '--stream']) if node.major_version < 11: for i in idx_ptrack: @@ -3508,8 +2642,7 @@ def test_ptrack_vacuum_bits_visibility(self): if node.major_version < 11: self.check_ptrack_map_sanity(node, idx_ptrack) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(node.data_dir) @@ -3518,7 +2651,7 @@ def test_ptrack_vacuum_bits_visibility(self): self.get_tblspace_path(node, 'somedata'), ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) @@ -3526,15 +2659,14 @@ def test_ptrack_vacuum_bits_visibility(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_full_2(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, pg_options={ 'wal_log_hints': 'on' }) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -3562,8 +2694,7 @@ def test_ptrack_vacuum_full_2(self): node.safe_psql('postgres', 'vacuum t_heap') node.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.pb.backup_node('node', node, options=['-j10', '--stream']) if node.major_version < 11: for i in idx_ptrack: @@ -3582,8 +2713,7 @@ def test_ptrack_vacuum_full_2(self): if node.major_version < 11: self.check_ptrack_map_sanity(node, idx_ptrack) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(node.data_dir) @@ -3593,7 +2723,7 @@ def test_ptrack_vacuum_full_2(self): self.get_tblspace_path(node, 'somedata'), ignore_errors=True) - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -3601,15 +2731,13 @@ def test_ptrack_vacuum_full_2(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_full_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() if master.major_version >= 11: @@ -3617,14 +2745,13 @@ def test_ptrack_vacuum_full_replica(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + self.pb.backup_node('master', master, options=['--stream']) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, 'replica', synchronous=True) replica.slow_start(replica=True) @@ -3654,14 +2781,8 @@ def test_ptrack_vacuum_full_replica(self): replica.safe_psql('postgres', 'checkpoint') # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + self.pb.backup_node('replica', replica, + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3684,14 +2805,13 @@ def test_ptrack_vacuum_full_replica(self): if replica.major_version < 11: self.check_ptrack_map_sanity(master, idx_ptrack) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='ptrack', options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) replica.cleanup() - self.restore_node(backup_dir, 'replica', replica) + self.pb.restore_node('replica', node=replica) pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -3699,15 +2819,13 @@ def test_ptrack_vacuum_full_replica(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_truncate_2(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -3733,8 +2851,7 @@ def test_ptrack_vacuum_truncate_2(self): node.safe_psql('postgres', 'VACUUM t_heap') - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.pb.backup_node('node', node, options=['-j10', '--stream']) if node.major_version < 11: for i in idx_ptrack: @@ -3754,17 +2871,15 @@ def test_ptrack_vacuum_truncate_2(self): if node.major_version < 11: self.check_ptrack_map_sanity(node, idx_ptrack) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -3772,15 +2887,13 @@ def test_ptrack_vacuum_truncate_2(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_truncate_replica(self): - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + master = self.pg_node.make_simple('master', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() if master.major_version >= 11: @@ -3788,15 +2901,14 @@ def test_ptrack_vacuum_truncate_replica(self): "postgres", "CREATE EXTENSION ptrack") - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, 'replica', synchronous=True) replica.slow_start(replica=True) @@ -3820,15 +2932,8 @@ def test_ptrack_vacuum_truncate_replica(self): master.safe_psql('postgres', 'checkpoint') # Take FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port) - ] + self.pb.backup_node('replica', replica, + options=['-j10', '--stream'] ) if master.major_version < 11: @@ -3853,8 +2958,7 @@ def test_ptrack_vacuum_truncate_replica(self): if master.major_version < 11: self.check_ptrack_map_sanity(master, idx_ptrack) - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', + self.pb.backup_node('replica', replica, backup_type='ptrack', options=[ '--stream', '--log-level-file=INFO', @@ -3862,11 +2966,10 @@ def test_ptrack_vacuum_truncate_replica(self): pgdata = self.pgdata_content(replica.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'replica', node_restored) + self.pb.restore_node('replica', node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -3877,15 +2980,13 @@ def test_ptrack_recovery(self): Check that ptrack map contain correct bits after recovery. Actual only for PTRACK 1.x """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'somedata') @@ -3932,18 +3033,16 @@ def test_ptrack_recovery(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_recovery_1(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -3960,8 +3059,7 @@ def test_ptrack_recovery_1(self): # "from generate_series(0,25600) i") "from generate_series(0,2560) i") - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # Create indexes for i in idx_ptrack: @@ -3995,18 +3093,15 @@ def test_ptrack_recovery_1(self): print("Die! Die! Why won't you die?... Why won't you die?") exit(1) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -4014,15 +3109,13 @@ def test_ptrack_recovery_1(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_zero_changes(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -4037,17 +3130,15 @@ def test_ptrack_zero_changes(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,2560) i") - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -4055,18 +3146,16 @@ def test_ptrack_zero_changes(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_pg_resetxlog(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=True, - initdb_params=['--data-checksums'], pg_options={ 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -4083,8 +3172,7 @@ def test_ptrack_pg_resetxlog(self): # "from generate_series(0,25600) i") "from generate_series(0,2560) i") - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # Create indexes for i in idx_ptrack: @@ -4121,7 +3209,7 @@ def test_ptrack_pg_resetxlog(self): pg_resetxlog_path = self.get_bin_path('pg_resetxlog') wal_dir = 'pg_xlog' - self.run_binary( + self.pb.run_binary( [ pg_resetxlog_path, '-D', @@ -4138,36 +3226,23 @@ def test_ptrack_pg_resetxlog(self): exit(1) # take ptrack backup -# self.backup_node( -# backup_dir, 'node', node, +# self.pb.backup_node( +# 'node', node, # backup_type='ptrack', options=['--stream']) - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance was brutalized by pg_resetxlog" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd) - ) - except ProbackupException as e: - self.assertTrue( - 'ERROR: LSN from ptrack_control ' in e.message and - 'is greater than Start LSN of previous backup' in e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='ptrack', + options=['--stream'], + expect_error="because instance was brutalized by pg_resetxlog") + self.assertMessage(regex='ERROR: LSN from ptrack_control .* ' + 'is greater than Start LSN of previous backup') # pgdata = self.pgdata_content(node.data_dir) # -# node_restored = self.make_simple_node( -# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) +# node_restored = self.pg_node.make_simple('node_restored') # node_restored.cleanup() # -# self.restore_node( -# backup_dir, 'node', node_restored) +# self.pb.restore_node( +# 'node', node_restored) # # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) @@ -4175,15 +3250,13 @@ def test_ptrack_pg_resetxlog(self): # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_ptrack_map(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -4201,8 +3274,7 @@ def test_corrupt_ptrack_map(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,2560) i") - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.safe_psql( 'postgres', @@ -4264,25 +3336,13 @@ def test_corrupt_ptrack_map(self): 'FATAL: ptrack init: incorrect checksum of file "{0}"'.format(ptrack_map), log_content) - self.set_auto_conf(node, {'ptrack.map_size': '0'}) + node.set_auto_conf({'ptrack.map_size': '0'}) node.slow_start() - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance ptrack is disabled" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Ptrack is disabled', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='ptrack', + options=['--stream'], + expect_error="because instance ptrack is disabled") + self.assertMessage(contains='ERROR: Ptrack is disabled') node.safe_psql( 'postgres', @@ -4291,28 +3351,15 @@ def test_corrupt_ptrack_map(self): node.stop(['-m', 'immediate', '-D', node.data_dir]) - self.set_auto_conf(node, {'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'}) + node.set_auto_conf({'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'}) node.slow_start() - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='ptrack', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because ptrack map is from future" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: LSN from ptrack_control', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='ptrack', + options=['--stream'], + expect_error="because ptrack map is from future") + self.assertMessage(contains='ERROR: LSN from ptrack_control') - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) node.safe_psql( @@ -4320,15 +3367,14 @@ def test_corrupt_ptrack_map(self): "update t_heap set id = nextval('t_seq'), text = md5(text), " "tsvector = md5(repeat(tsvector::text, 10))::tsvector") - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -4346,15 +3392,13 @@ def test_horizon_lsn_ptrack(self): self.version_to_num('2.4.15'), 'You need pg_probackup old_binary =< 2.4.15 for this test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.safe_psql( @@ -4367,16 +3411,16 @@ def test_horizon_lsn_ptrack(self): "You need ptrack >=2.1 for this test") # set map_size to a minimal value - self.set_auto_conf(node, {'ptrack.map_size': '1'}) + node.set_auto_conf({'ptrack.map_size': '1'}) node.restart() node.pgbench_init(scale=100) # FULL backup - full_id = self.backup_node(backup_dir, 'node', node, options=['--stream'], old_binary=True) + full_id = self.pb.backup_node('node', node, options=['--stream'], old_binary=True) # enable archiving so the WAL size to do interfere with data bytes comparison later - self.set_archiving(backup_dir, 'node', node) + self.pb.set_archiving('node', node) node.restart() # change data @@ -4384,14 +3428,13 @@ def test_horizon_lsn_ptrack(self): pgbench.wait() # DELTA is exemplar - delta_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') - delta_bytes = self.show_pb(backup_dir, 'node', backup_id=delta_id)["data-bytes"] - self.delete_pb(backup_dir, 'node', backup_id=delta_id) + delta_id = self.pb.backup_node('node', node, backup_type='delta') + delta_bytes = self.pb.show('node', backup_id=delta_id)["data-bytes"] + self.pb.delete('node', backup_id=delta_id) # PTRACK with current binary - ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack') - ptrack_bytes = self.show_pb(backup_dir, 'node', backup_id=ptrack_id)["data-bytes"] + ptrack_id = self.pb.backup_node('node', node, backup_type='ptrack') + ptrack_bytes = self.pb.show('node', backup_id=ptrack_id)["data-bytes"] # make sure that backup size is exactly the same self.assertEqual(delta_bytes, ptrack_bytes) diff --git a/tests/remote_test.py b/tests/remote_test.py index 2d36d7346..519380b3d 100644 --- a/tests/remote_test.py +++ b/tests/remote_test.py @@ -1,43 +1,19 @@ -import unittest -import os -from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.cfs_helpers import find_by_name +from .helpers.ptrack_helpers import ProbackupTest -class RemoteTest(ProbackupTest, unittest.TestCase): +class RemoteTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure def test_remote_sanity(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - output = self.backup_node( - backup_dir, 'node', node, + output = self.pb.backup_node('node', node, options=['--stream'], no_remote=True, return_id=False) self.assertIn('remote: false', output) - - # try: - # self.backup_node( - # backup_dir, 'node', - # node, options=['--remote-proto=ssh', '--stream'], no_remote=True) - # # we should die here because exception is what we expect to happen - # self.assertEqual( - # 1, 0, - # "Expecting Error because remote-host option is missing." - # "\n Output: {0} \n CMD: {1}".format( - # repr(self.output), self.cmd)) - # except ProbackupException as e: - # self.assertIn( - # "Insert correct error", - # e.message, - # "\n Unexpected Error Message: {0}\n CMD: {1}".format( - # repr(e.message), self.cmd)) diff --git a/tests/replica_test.py b/tests/replica_test.py index 17fc5a823..7ff539b34 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -1,15 +1,12 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -import subprocess -import time -from distutils.dir_util import copy_tree +from .helpers.ptrack_helpers import ProbackupTest +from pg_probackup2.gdb import needs_gdb from testgres import ProcessType from time import sleep -class ReplicaTest(ProbackupTest, unittest.TestCase): +class ReplicaTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure @@ -19,35 +16,28 @@ def test_replica_switchover(self): over the course of several switchovers https://www.postgresql.org/message-id/54b059d4-2b48-13a4-6f43-95a087c92367%40postgrespro.ru """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums']) - - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') + backup_dir = self.backup_dir + node1 = self.pg_node.make_simple('node1', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node1', node1) + self.pb.init() + self.pb.add_instance('node1', node1) node1.slow_start() # take full backup and restore it - self.backup_node(backup_dir, 'node1', node1, options=['--stream']) - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + self.pb.backup_node('node1', node1, options=['--stream']) + node2 = self.pg_node.make_simple('node2') node2.cleanup() # create replica - self.restore_node(backup_dir, 'node1', node2) + self.pb.restore_node('node1', node=node2) # setup replica - self.add_instance(backup_dir, 'node2', node2) - self.set_archiving(backup_dir, 'node2', node2, replica=True) + self.pb.add_instance('node2', node2) + self.pb.set_archiving('node2', node2, replica=True) self.set_replica(node1, node2, synchronous=False) - self.set_auto_conf(node2, {'port': node2.port}) + node2.set_auto_conf({'port': node2.port}) node2.slow_start(replica=True) @@ -55,7 +45,7 @@ def test_replica_switchover(self): node1.pgbench_init(scale=5) # take full backup on replica - self.backup_node(backup_dir, 'node2', node2, options=['--stream']) + self.pb.backup_node('node2', node2, options=['--stream']) # first switchover node1.stop() @@ -66,8 +56,7 @@ def test_replica_switchover(self): node1.slow_start(replica=True) # take incremental backup from new master - self.backup_node( - backup_dir, 'node2', node2, + self.pb.backup_node('node2', node2, backup_type='delta', options=['--stream']) # second switchover @@ -81,12 +70,11 @@ def test_replica_switchover(self): node1.pgbench_init(scale=5) # take incremental backup from replica - self.backup_node( - backup_dir, 'node2', node2, + self.pb.backup_node('node2', node2, backup_type='delta', options=['--stream']) # https://github.com/postgrespro/pg_probackup/issues/251 - self.validate_pb(backup_dir) + self.pb.validate() # @unittest.skip("skip") # @unittest.expectedFailure @@ -98,26 +86,19 @@ def test_replica_stream_ptrack_backup(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - if self.pg_config_version > self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() - if master.major_version >= 12: - master.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") + master.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") # CREATE TABLE master.psql( @@ -128,11 +109,10 @@ def test_replica_stream_ptrack_backup(self): before = master.table_checksum("t_heap") # take full backup and restore it - self.backup_node(backup_dir, 'master', master, options=['--stream']) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + self.pb.backup_node('master', master, options=['--stream']) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) self.set_replica(master, replica) # Check data correctness on replica @@ -149,26 +129,20 @@ def test_replica_stream_ptrack_backup(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") before = master.table_checksum("t_heap") - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) - backup_id = self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - self.validate_pb(backup_dir, 'replica') + backup_id = self.pb.backup_node('replica', replica, + options=['--stream']) + self.pb.validate('replica') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + 'OK', self.pb.show('replica', backup_id)['status']) # RESTORE FULL BACKUP TAKEN FROM PREVIOUS STEP - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + node = self.pg_node.make_simple('node') node.cleanup() - self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) + self.pb.restore_node('replica', node=node) - self.set_auto_conf(node, {'port': node.port}) + node.set_auto_conf({'port': node.port}) node.slow_start() @@ -187,23 +161,17 @@ def test_replica_stream_ptrack_backup(self): before = master.table_checksum("t_heap") - backup_id = self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) - self.validate_pb(backup_dir, 'replica') + backup_id = self.pb.backup_node('replica', replica, backup_type='ptrack', + options=['--stream']) + self.pb.validate('replica') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + 'OK', self.pb.show('replica', backup_id)['status']) # RESTORE PTRACK BACKUP TAKEN FROM replica node.cleanup() - self.restore_node( - backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id) + self.pb.restore_node('replica', node, backup_id=backup_id) - self.set_auto_conf(node, {'port': node.port}) + node.set_auto_conf({'port': node.port}) node.slow_start() @@ -212,143 +180,118 @@ def test_replica_stream_ptrack_backup(self): self.assertEqual(before, after) # @unittest.skip("skip") + @needs_gdb def test_replica_archive_page_backup(self): """ make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '10s', 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,2560) i") + master.pgbench_init(scale=5) - before = master.table_checksum("t_heap") + before = master.table_checksum("pgbench_accounts") - backup_id = self.backup_node( - backup_dir, 'master', master, backup_type='page') - self.restore_node(backup_dir, 'master', replica) + backup_id = self.pb.backup_node('master', master, backup_type='page') + self.pb.restore_node('master', node=replica) # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.pb.set_archiving('replica', replica, replica=True) replica.slow_start(replica=True) # Check data correctness on replica - after = replica.table_checksum("t_heap") + after = replica.table_checksum("pgbench_accounts") self.assertEqual(before, after) # Change data on master, take FULL backup from replica, # restore taken backup and check that restored data # equal to original data - master.psql( - "postgres", - "insert into t_heap select i as id, md5(i::text) as text, " - "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(256,25120) i") + pgbench = master.pgbench(options=['-T', '3', '-c', '2', '--no-vacuum']) + pgbench.wait() - before = master.table_checksum("t_heap") + before = master.table_checksum("pgbench_accounts") self.wait_until_replica_catch_with_master(master, replica) - backup_id = self.backup_node( - backup_dir, 'replica', replica, - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + backup_id, _ = self.pb.backup_replica_node('replica', replica, + master=master, + options=['--archive-timeout=60']) - self.validate_pb(backup_dir, 'replica') + self.pb.validate('replica') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + 'OK', self.pb.show('replica', backup_id)['status']) # RESTORE FULL BACKUP TAKEN FROM replica - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node')) + node = self.pg_node.make_simple('node') node.cleanup() - self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) + self.pb.restore_node('replica', node=node) - self.set_auto_conf(node, {'port': node.port, 'archive_mode': 'off'}) + node.set_auto_conf({'port': node.port, 'archive_mode': 'off'}) node.slow_start() # CHECK DATA CORRECTNESS - after = node.table_checksum("t_heap") + after = node.table_checksum("pgbench_accounts") self.assertEqual(before, after) node.cleanup() # Change data on master, make PAGE backup from replica, # restore taken backup and check that restored data equal # to original data - master.pgbench_init(scale=5) - pgbench = master.pgbench( - options=['-T', '30', '-c', '2', '--no-vacuum']) + options=['-T', '15', '-c', '1', '--no-vacuum']) - backup_id = self.backup_node( - backup_dir, 'replica', + backup_id, _ = self.pb.backup_replica_node('replica', replica, backup_type='page', - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + master=master, + options=['--archive-timeout=60']) pgbench.wait() - self.switch_wal_segment(master) + lsn = self.switch_wal_segment(master) before = master.table_checksum("pgbench_accounts") - self.validate_pb(backup_dir, 'replica') + self.pb.validate('replica') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) + 'OK', self.pb.show('replica', backup_id)['status']) # RESTORE PAGE BACKUP TAKEN FROM replica - self.restore_node( - backup_dir, 'replica', data_dir=node.data_dir, + self.pb.restore_node('replica', node, backup_id=backup_id) - self.set_auto_conf(node, {'port': node.port, 'archive_mode': 'off'}) + node.set_auto_conf({'port': node.port, 'archive_mode': 'off'}) node.slow_start() + self.wait_until_lsn_replayed(node, lsn) + # CHECK DATA CORRECTNESS - after = master.table_checksum("pgbench_accounts") + after = node.table_checksum("pgbench_accounts") self.assertEqual( before, after, 'Restored data is not equal to original') - self.add_instance(backup_dir, 'node', node) - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.add_instance('node', node) + self.pb.backup_node('node', node, options=['--stream']) # @unittest.skip("skip") def test_basic_make_replica_via_restore(self): @@ -356,28 +299,21 @@ def test_basic_make_replica_via_restore(self): make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) master.psql( "postgres", @@ -387,20 +323,17 @@ def test_basic_make_replica_via_restore(self): before = master.table_checksum("t_heap") - backup_id = self.backup_node( - backup_dir, 'master', master, backup_type='page') - self.restore_node( - backup_dir, 'master', replica, options=['-R']) + backup_id = self.pb.backup_node('master', master, backup_type='page') + self.pb.restore_node('master', replica, options=['-R']) # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.pb.add_instance('replica', replica) + self.pb.set_archiving('replica', replica, replica=True) self.set_replica(master, replica, synchronous=True) replica.slow_start(replica=True) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, options=['--archive-timeout=30s', '--stream']) # @unittest.skip("skip") @@ -410,27 +343,20 @@ def test_take_backup_from_delayed_replica(self): restore full backup as delayed replica, launch pgbench, take FULL, PAGE and DELTA backups from replica """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) master.psql( "postgres", @@ -444,22 +370,20 @@ def test_take_backup_from_delayed_replica(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,165000) i") - self.restore_node( - backup_dir, 'master', replica, options=['-R']) + self.pb.restore_node('master', replica, options=['-R']) # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.pb.add_instance('replica', replica) + self.pb.set_archiving('replica', replica, replica=True) - self.set_auto_conf(replica, {'port': replica.port}) + replica.set_auto_conf({'port': replica.port}) replica.slow_start(replica=True) self.wait_until_replica_catch_with_master(master, replica) - if self.get_version(master) >= self.version_to_num('12.0'): - self.set_auto_conf( - replica, {'recovery_min_apply_delay': '300s'}) + if self.pg_config_version >= self.version_to_num('12.0'): + replica.set_auto_conf({'recovery_min_apply_delay': '300s'}) else: replica.append_conf( 'recovery.conf', @@ -473,19 +397,16 @@ def test_take_backup_from_delayed_replica(self): pgbench = master.pgbench( options=['-T', '60', '-c', '2', '--no-vacuum']) - self.backup_node( - backup_dir, 'replica', + self.pb.backup_node('replica', replica, options=['--archive-timeout=60s']) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, data_dir=replica.data_dir, backup_type='page', options=['--archive-timeout=60s']) sleep(1) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='delta', options=['--archive-timeout=60s']) pgbench.wait() @@ -493,52 +414,42 @@ def test_take_backup_from_delayed_replica(self): pgbench = master.pgbench( options=['-T', '30', '-c', '2', '--no-vacuum']) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, options=['--stream']) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='page', options=['--stream']) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='delta', options=['--stream']) pgbench.wait() # @unittest.skip("skip") + @needs_gdb def test_replica_promote(self): """ start backup from replica, during backup promote replica check that backup is failed """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '10s', 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) master.psql( "postgres", @@ -546,12 +457,11 @@ def test_replica_promote(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,165000) i") - self.restore_node( - backup_dir, 'master', replica, options=['-R']) + self.pb.restore_node('master', replica, options=['-R']) # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) - self.set_archiving(backup_dir, 'replica', replica, replica=True) + self.pb.add_instance('replica', replica) + self.pb.set_archiving('replica', replica, replica=True) self.set_replica( master, replica, replica_name='replica', synchronous=True) @@ -566,8 +476,7 @@ def test_replica_promote(self): self.wait_until_replica_catch_with_master(master, replica) # start backup from replica - gdb = self.backup_node( - backup_dir, 'replica', replica, gdb=True, + gdb = self.pb.backup_node('replica', replica, gdb=True, options=['--log-level-file=verbose']) gdb.set_breakpoint('backup_data_file') @@ -576,16 +485,12 @@ def test_replica_promote(self): replica.promote() - gdb.remove_all_breakpoints() gdb.continue_execution_until_exit() - backup_id = self.show_pb( - backup_dir, 'replica')[0]["id"] + backup_id = self.pb.show('replica')[0]["id"] # read log file content - with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: - log_content = f.read() - f.close + log_content = self.read_pb_log() self.assertIn( 'ERROR: the standby was promoted during online backup', @@ -597,52 +502,44 @@ def test_replica_promote(self): log_content) # @unittest.skip("skip") + @needs_gdb def test_replica_stop_lsn_null_offset(self): """ """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + self.test_env["PGPROBACKUP_TESTS_SKIP_EMPTY_COMMIT"] = "ON" + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master) + self.pb.init() + self.pb.add_instance('node', master) + self.pb.set_archiving('node', master) master.slow_start() # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) + gdb_bgwriter = self.gdb_attach(bgwriter_pid) - self.backup_node(backup_dir, 'node', master) + self.pb.backup_node('node', master) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'node', replica) + self.pb.restore_node('node', node=replica) # Settings for Replica self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) + self.pb.set_archiving('node', replica, replica=True) replica.slow_start(replica=True) self.switch_wal_segment(master) self.switch_wal_segment(master) - output = self.backup_node( - backup_dir, 'node', replica, replica.data_dir, + output = self.pb.backup_node('node', replica, replica.data_dir, options=[ '--archive-timeout=30', '--log-level-console=LOG', @@ -650,25 +547,9 @@ def test_replica_stop_lsn_null_offset(self): '--stream'], return_id=False) - self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', - output) - - self.assertIn( - 'WARNING: WAL segment 000000010000000000000004 could not be streamed in 30 seconds', - output) - - self.assertIn( - 'WARNING: Failed to get next WAL record after 0/4000000, looking for previous WAL record', - output) - - self.assertIn( - 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', - output) - self.assertIn( 'has endpoint 0/4000000 which is ' - 'equal or greater than requested LSN 0/4000000', + 'equal or greater than requested LSN', output) self.assertIn( @@ -676,62 +557,48 @@ def test_replica_stop_lsn_null_offset(self): output) # Clean after yourself - gdb_checkpointer.kill() + gdb_bgwriter.detach() # @unittest.skip("skip") + @needs_gdb def test_replica_stop_lsn_null_offset_next_record(self): """ """ - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + self.test_env["PGPROBACKUP_TESTS_SKIP_EMPTY_COMMIT"] = "ON" + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] + gdb_bgwriter = self.gdb_attach(bgwriter_pid) - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) + self.pb.set_archiving('replica', replica, replica=True) replica.slow_start(replica=True) - self.switch_wal_segment(master) - self.switch_wal_segment(master) - # open connection to master conn = master.connect() - gdb = self.backup_node( - backup_dir, 'replica', replica, + gdb = self.pb.backup_node('replica', replica, options=[ '--archive-timeout=40', '--log-level-file=LOG', @@ -740,255 +607,189 @@ def test_replica_stop_lsn_null_offset_next_record(self): gdb=True) # Attention! this breakpoint is set to a probackup internal function, not a postgres core one - gdb.set_breakpoint('pg_stop_backup') + gdb.set_breakpoint('pg_stop_backup_consume') gdb.run_until_break() - gdb.remove_all_breakpoints() - gdb.continue_execution_until_running() - - sleep(5) conn.execute("create table t1()") conn.commit() - while 'RUNNING' in self.show_pb(backup_dir, 'replica')[0]['status']: - sleep(5) + sleep(5) - file = os.path.join(backup_dir, 'log', 'pg_probackup.log') + gdb.continue_execution_until_exit() - with open(file) as f: - log_content = f.read() + log_content = self.read_pb_log() self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', + 'has endpoint 0/4000000 which is ' + 'equal or greater than requested LSN', log_content) self.assertIn( - 'LOG: Looking for segment: 000000010000000000000004', + 'LOG: Found prior LSN:', log_content) self.assertIn( - 'LOG: First record in WAL segment "000000010000000000000004": 0/4000028', + 'INFO: backup->stop_lsn 0/4000000', log_content) - self.assertIn( - 'INFO: stop_lsn: 0/4000000', - log_content) + self.assertTrue(self.pb.show('replica')[0]['status'] == 'DONE') - self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') + gdb_bgwriter.detach() # @unittest.skip("skip") + @needs_gdb def test_archive_replica_null_offset(self): """ """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master) + self.pb.init() + self.pb.add_instance('node', master) + self.pb.set_archiving('node', master) master.slow_start() - self.backup_node(backup_dir, 'node', master) + self.pb.backup_node('node', master) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'node', replica) + self.pb.restore_node('node', node=replica) # Settings for Replica self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) + self.pb.set_archiving('node', replica, replica=True) # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) + gdb_bgwriter = self.gdb_attach(bgwriter_pid) replica.slow_start(replica=True) - self.switch_wal_segment(master) self.switch_wal_segment(master) # take backup from replica - output = self.backup_node( - backup_dir, 'node', replica, replica.data_dir, + _, output = self.pb.backup_replica_node('node', replica, replica.data_dir, + master=master, options=[ - '--archive-timeout=30', - '--log-level-console=LOG', + '--archive-timeout=300', + '--log-level-file=LOG', '--no-validate'], - return_id=False) - - self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', - output) - - self.assertIn( - 'WARNING: WAL segment 000000010000000000000004 could not be archived in 30 seconds', - output) - - self.assertIn( - 'WARNING: Failed to get next WAL record after 0/4000000, looking for previous WAL record', - output) + ) - self.assertIn( - 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', - output) + self.assertRegex( + output, + r'LOG: Looking for LSN 0/[45]000000 in segment: 00000001000000000000000[34]') - self.assertIn( - 'has endpoint 0/4000000 which is ' - 'equal or greater than requested LSN 0/4000000', - output) + self.assertRegex( + output, + r'has endpoint 0/[45]000000 which is ' + r'equal or greater than requested LSN 0/[45]000000') self.assertIn( 'LOG: Found prior LSN:', output) - print(output) + gdb_bgwriter.detach() # @unittest.skip("skip") + @needs_gdb def test_archive_replica_not_null_offset(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ + 'archive_timeout' : '1h', 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', master) - self.set_archiving(backup_dir, 'node', master) + self.pb.init() + self.pb.add_instance('node', master) + self.pb.set_archiving('node', master) master.slow_start() - self.backup_node(backup_dir, 'node', master) + self.pb.backup_node('node', master) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'node', replica) + self.pb.restore_node('node', node=replica) # Settings for Replica self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'node', replica, replica=True) + self.pb.set_archiving('node', replica, replica=True) replica.slow_start(replica=True) # take backup from replica - self.backup_node( - backup_dir, 'node', replica, replica.data_dir, + self.pb.backup_replica_node('node', replica, replica.data_dir, + master=master, options=[ - '--archive-timeout=30', - '--log-level-console=LOG', + '--archive-timeout=300', '--no-validate'], - return_id=False) + ) + + master.execute('select txid_current()') + self.wait_until_replica_catch_with_master(master, replica) - try: - self.backup_node( - backup_dir, 'node', replica, replica.data_dir, + output = self.pb.backup_node('node', replica, replica.data_dir, options=[ - '--archive-timeout=30', + '--archive-timeout=10', '--log-level-console=LOG', - '--no-validate']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of archive timeout. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - # vanilla -- 0/4000060 - # pgproee -- 0/4000078 - self.assertRegex( - e.message, - r'LOG: Looking for LSN (0/4000060|0/4000078) in segment: 000000010000000000000004', - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertRegex( - e.message, - r'INFO: Wait for LSN (0/4000060|0/4000078) in archived WAL segment', - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - 'ERROR: WAL segment 000000010000000000000004 could not be archived in 30 seconds', - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + '--log-level-file=LOG', + '--no-validate'], + expect_error=True) + + self.assertMessage(output, regex=r'LOG: Looking for LSN 0/[45]0000(?!00)[A-F\d]{2} in segment: 0*10*[45]') + + self.assertMessage(output, regex=r'ERROR: WAL segment 0*10*[45] could not be archived in \d+ seconds') # @unittest.skip("skip") + @needs_gdb def test_replica_toast(self): """ make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '1h', 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) - self.set_archiving(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) + self.pb.set_archiving('master', master) master.slow_start() # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) + gdb_bgwriter = self.gdb_attach(bgwriter_pid) - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, synchronous=True) - self.set_archiving(backup_dir, 'replica', replica, replica=True) - - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) + self.pb.set_archiving('replica', replica, replica=True) replica.slow_start(replica=True) - self.switch_wal_segment(master) - self.switch_wal_segment(master) - master.safe_psql( 'postgres', 'CREATE TABLE t1 AS ' @@ -997,8 +798,7 @@ def test_replica_toast(self): self.wait_until_replica_catch_with_master(master, replica) - output = self.backup_node( - backup_dir, 'replica', replica, + output = self.pb.backup_node('replica', replica, options=[ '--archive-timeout=30', '--log-level-console=LOG', @@ -1008,10 +808,6 @@ def test_replica_toast(self): pgdata = self.pgdata_content(replica.data_dir) - self.assertIn( - 'WARNING: Could not read WAL record at', - output) - self.assertIn( 'LOG: Found prior LSN:', output) @@ -1022,7 +818,7 @@ def test_replica_toast(self): replica.cleanup() - self.restore_node(backup_dir, 'replica', replica) + self.pb.restore_node('replica', node=replica) pgdata_restored = self.pgdata_content(replica.data_dir) replica.slow_start() @@ -1036,44 +832,39 @@ def test_replica_toast(self): self.compare_pgdata(pgdata, pgdata_restored) # Clean after yourself - gdb_checkpointer.kill() + gdb_bgwriter.detach() # @unittest.skip("skip") + @needs_gdb def test_start_stop_lsn_in_the_same_segno(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '1h', 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] + gdb_bgwriter = self.gdb_attach(bgwriter_pid) - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) # Settings for Replica - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) self.set_replica(master, replica, synchronous=True) replica.slow_start(replica=True) @@ -1095,8 +886,7 @@ def test_start_stop_lsn_in_the_same_segno(self): sleep(60) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, options=[ '--archive-timeout=30', '--log-level-console=LOG', @@ -1104,8 +894,7 @@ def test_start_stop_lsn_in_the_same_segno(self): '--stream'], return_id=False) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, options=[ '--archive-timeout=30', '--log-level-console=LOG', @@ -1113,36 +902,31 @@ def test_start_stop_lsn_in_the_same_segno(self): '--stream'], return_id=False) + gdb_bgwriter.detach() + @unittest.skip("skip") def test_replica_promote_1(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) # set replica True, so archive_mode 'always' is used. - self.set_archiving(backup_dir, 'master', master, replica=True) + self.pb.set_archiving('master', master, replica=True) master.slow_start() - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) # Settings for Replica self.set_replica(master, replica) @@ -1157,18 +941,14 @@ def test_replica_promote_1(self): self.wait_until_replica_catch_with_master(master, replica) - wal_file = os.path.join( - backup_dir, 'wal', 'master', '000000010000000000000004') - - wal_file_partial = os.path.join( - backup_dir, 'wal', 'master', '000000010000000000000004.partial') + self.assertFalse( + self.instance_wal_exists(backup_dir, master, '000000010000000000000004')) - self.assertFalse(os.path.exists(wal_file)) + wal_file_partial = '000000010000000000000004.partial' replica.promote() - while not os.path.exists(wal_file_partial): - sleep(1) + self.wait_instance_wal_exists(backup_dir, 'master', wal_file_partial) self.switch_wal_segment(master) @@ -1176,41 +956,33 @@ def test_replica_promote_1(self): sleep(70) self.assertTrue( - os.path.exists(wal_file_partial), - "File {0} disappeared".format(wal_file)) - - self.assertTrue( - os.path.exists(wal_file_partial), + self.instance_wal_exists(backup_dir, 'master', wal_file_partial), "File {0} disappeared".format(wal_file_partial)) # @unittest.skip("skip") def test_replica_promote_2(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) # set replica True, so archive_mode 'always' is used. - self.set_archiving( - backup_dir, 'master', master, replica=True) + self.pb.set_archiving('master', master, replica=True) master.slow_start() - self.backup_node(backup_dir, 'master', master) + self.pb.backup_node('master', master) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) # Settings for Replica self.set_replica(master, replica) - self.set_auto_conf(replica, {'port': replica.port}) + replica.set_auto_conf({'port': replica.port}) replica.slow_start(replica=True) @@ -1224,8 +996,7 @@ def test_replica_promote_2(self): replica.promote() - self.backup_node( - backup_dir, 'master', replica, data_dir=replica.data_dir, + self.pb.backup_node('master', replica, data_dir=replica.data_dir, backup_type='page') # @unittest.skip("skip") @@ -1235,82 +1006,58 @@ def test_replica_promote_archive_delta(self): t2 /-------> t1 --F---D1--D2-- """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), + backup_dir = self.backup_dir + node1 = self.pg_node.make_simple('node1', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node1) - self.set_config( - backup_dir, 'node', options=['--archive-timeout=60s']) - self.set_archiving(backup_dir, 'node', node1) + self.pb.init() + self.pb.add_instance('node', node1) + self.pb.set_config('node', options=['--archive-timeout=60s']) + self.pb.set_archiving('node', node1) node1.slow_start() - self.backup_node(backup_dir, 'node', node1, options=['--stream']) + self.pb.backup_node('node', node1, options=['--stream']) # Create replica - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.pg_node.make_simple('node2') node2.cleanup() - self.restore_node(backup_dir, 'node', node2, node2.data_dir) + self.pb.restore_node('node', node=node2) # Settings for Replica self.set_replica(node1, node2) - self.set_auto_conf(node2, {'port': node2.port}) - self.set_archiving(backup_dir, 'node', node2, replica=True) + node2.set_auto_conf({'port': node2.port}) + self.pb.set_archiving('node', node2, replica=True) node2.slow_start(replica=True) - node1.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') + create_table(node1, 't1') self.wait_until_replica_catch_with_master(node1, node2) - node1.safe_psql( - 'postgres', - 'CREATE TABLE t2 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') + create_table(node1, 't2') self.wait_until_replica_catch_with_master(node1, node2) # delta backup on replica on timeline 1 - delta1_id = self.backup_node( - backup_dir, 'node', node2, node2.data_dir, + delta1_id = self.pb.backup_node('node', node2, node2.data_dir, 'delta', options=['--stream']) # delta backup on replica on timeline 1 - delta2_id = self.backup_node( - backup_dir, 'node', node2, node2.data_dir, 'delta') + delta2_id = self.pb.backup_node('node', node2, node2.data_dir, 'delta') - self.change_backup_status( - backup_dir, 'node', delta2_id, 'ERROR') + self.change_backup_status(backup_dir, 'node', delta2_id, 'ERROR') # node2 is now master node2.promote() - node2.safe_psql( - 'postgres', - 'CREATE TABLE t3 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') + create_table(node2, 't3') # node1 is now replica node1.cleanup() # kludge "backup_id=delta1_id" - self.restore_node( - backup_dir, 'node', node1, node1.data_dir, + self.pb.restore_node('node', node1, backup_id=delta1_id, options=[ '--recovery-target-timeline=2', @@ -1318,16 +1065,12 @@ def test_replica_promote_archive_delta(self): # Settings for Replica self.set_replica(node2, node1) - self.set_auto_conf(node1, {'port': node1.port}) - self.set_archiving(backup_dir, 'node', node1, replica=True) + node1.set_auto_conf({'port': node1.port}) + self.pb.set_archiving('node', node1, replica=True) node1.slow_start(replica=True) - node2.safe_psql( - 'postgres', - 'CREATE TABLE t4 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,30) i') + create_table(node2, 't4') self.wait_until_replica_catch_with_master(node2, node1) # node1 is back to be a master @@ -1336,14 +1079,13 @@ def test_replica_promote_archive_delta(self): sleep(5) # delta backup on timeline 3 - self.backup_node( - backup_dir, 'node', node1, node1.data_dir, 'delta', + self.pb.backup_node('node', node1, node1.data_dir, 'delta', options=['--archive-timeout=60']) pgdata = self.pgdata_content(node1.data_dir) node1.cleanup() - self.restore_node(backup_dir, 'node', node1, node1.data_dir) + self.pb.restore_node('node', node=node1) pgdata_restored = self.pgdata_content(node1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -1355,82 +1097,58 @@ def test_replica_promote_archive_page(self): t2 /-------> t1 --F---P1--P2-- """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), + backup_dir = self.backup_dir + node1 = self.pg_node.make_simple('node1', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node1) - self.set_archiving(backup_dir, 'node', node1) - self.set_config( - backup_dir, 'node', options=['--archive-timeout=60s']) + self.pb.init() + self.pb.add_instance('node', node1) + self.pb.set_archiving('node', node1) + self.pb.set_config('node', options=['--archive-timeout=60s']) node1.slow_start() - self.backup_node(backup_dir, 'node', node1, options=['--stream']) + self.pb.backup_node('node', node1, options=['--stream']) # Create replica - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.pg_node.make_simple('node2') node2.cleanup() - self.restore_node(backup_dir, 'node', node2, node2.data_dir) + self.pb.restore_node('node', node=node2) # Settings for Replica self.set_replica(node1, node2) - self.set_auto_conf(node2, {'port': node2.port}) - self.set_archiving(backup_dir, 'node', node2, replica=True) + node2.set_auto_conf({'port': node2.port}) + self.pb.set_archiving('node', node2, replica=True) node2.slow_start(replica=True) - node1.safe_psql( - 'postgres', - 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') + create_table(node1, 't1') self.wait_until_replica_catch_with_master(node1, node2) - node1.safe_psql( - 'postgres', - 'CREATE TABLE t2 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') + create_table(node1, 't2') self.wait_until_replica_catch_with_master(node1, node2) # page backup on replica on timeline 1 - page1_id = self.backup_node( - backup_dir, 'node', node2, node2.data_dir, + page1_id = self.pb.backup_node('node', node2, node2.data_dir, 'page', options=['--stream']) # page backup on replica on timeline 1 - page2_id = self.backup_node( - backup_dir, 'node', node2, node2.data_dir, 'page') + page2_id = self.pb.backup_node('node', node2, node2.data_dir, 'page') - self.change_backup_status( - backup_dir, 'node', page2_id, 'ERROR') + self.change_backup_status(backup_dir, 'node', page2_id, 'ERROR') # node2 is now master node2.promote() - node2.safe_psql( - 'postgres', - 'CREATE TABLE t3 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,20) i') + create_table(node2, 't3') # node1 is now replica node1.cleanup() # kludge "backup_id=page1_id" - self.restore_node( - backup_dir, 'node', node1, node1.data_dir, + self.pb.restore_node('node', node1, backup_id=page1_id, options=[ '--recovery-target-timeline=2', @@ -1438,16 +1156,12 @@ def test_replica_promote_archive_page(self): # Settings for Replica self.set_replica(node2, node1) - self.set_auto_conf(node1, {'port': node1.port}) - self.set_archiving(backup_dir, 'node', node1, replica=True) + node1.set_auto_conf({'port': node1.port}) + self.pb.set_archiving('node', node1, replica=True) node1.slow_start(replica=True) - node2.safe_psql( - 'postgres', - 'CREATE TABLE t4 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' - 'FROM generate_series(0,30) i') + create_table(node2, 't4') self.wait_until_replica_catch_with_master(node2, node1) # node1 is back to be a master @@ -1456,17 +1170,16 @@ def test_replica_promote_archive_page(self): sleep(5) - # delta3_id = self.backup_node( - # backup_dir, 'node', node2, node2.data_dir, 'delta') + # delta3_id = self.pb.backup_node( + # 'node', node2, node2.data_dir, 'delta') # page backup on timeline 3 - page3_id = self.backup_node( - backup_dir, 'node', node1, node1.data_dir, 'page', + page3_id = self.pb.backup_node('node', node1, node1.data_dir, 'page', options=['--archive-timeout=60']) pgdata = self.pgdata_content(node1.data_dir) node1.cleanup() - self.restore_node(backup_dir, 'node', node1, node1.data_dir) + self.pb.restore_node('node', node=node1) pgdata_restored = self.pgdata_content(node1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -1475,32 +1188,25 @@ def test_replica_promote_archive_page(self): def test_parent_choosing(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - master = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'master'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + master = self.pg_node.make_simple('master', + set_replication=True) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.skipTest( - 'Skipped because backup from replica is not supported in PG 9.5') - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'master', master) + self.pb.init() + self.pb.add_instance('master', master) master.slow_start() - self.backup_node(backup_dir, 'master', master, options=['--stream']) + self.pb.backup_node('master', master, options=['--stream']) # Create replica - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() - self.restore_node(backup_dir, 'master', replica) + self.pb.restore_node('master', node=replica) # Settings for Replica self.set_replica(master, replica) - self.set_auto_conf(replica, {'port': replica.port}) + replica.set_auto_conf({'port': replica.port}) replica.slow_start(replica=True) @@ -1511,10 +1217,9 @@ def test_parent_choosing(self): 'FROM generate_series(0,20) i') self.wait_until_replica_catch_with_master(master, replica) - self.add_instance(backup_dir, 'replica', replica) + self.pb.add_instance('replica', replica) - full_id = self.backup_node( - backup_dir, 'replica', + full_id = self.pb.backup_node('replica', replica, options=['--stream']) master.safe_psql( @@ -1524,82 +1229,64 @@ def test_parent_choosing(self): 'FROM generate_series(0,20) i') self.wait_until_replica_catch_with_master(master, replica) - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='delta', options=['--stream']) replica.promote() # failing, because without archving, it is impossible to # take multi-timeline backup. - self.backup_node( - backup_dir, 'replica', replica, + self.pb.backup_node('replica', replica, backup_type='delta', options=['--stream']) # @unittest.skip("skip") def test_instance_from_the_past(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - full_id = self.backup_node(backup_dir, 'node', node, options=['--stream']) + full_id = self.pb.backup_node('node', node, options=['--stream']) node.pgbench_init(scale=10) - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.cleanup() - self.restore_node(backup_dir, 'node', node, backup_id=full_id) + self.pb.restore_node('node', node=node, backup_id=full_id) node.slow_start() - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because instance is from the past " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Current START LSN' in e.message and - 'is lower than START LSN' in e.message and - 'It may indicate that we are trying to backup ' - 'PostgreSQL instance from the past' in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, backup_type='delta', + options=['--stream'], + expect_error="because instance is from the past") + self.assertMessage(regex='ERROR: Current START LSN .* is lower than START LSN') + self.assertMessage(contains='It may indicate that we are trying to backup ' + 'PostgreSQL instance from the past') # @unittest.skip("skip") def test_replica_via_basebackup(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={'hot_standby': 'on'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=10) #FULL backup - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) pgbench = node.pgbench( options=['-T', '10', '-c', '1', '--no-vacuum']) @@ -1607,48 +1294,54 @@ def test_replica_via_basebackup(self): node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=['--recovery-target=latest', '--recovery-target-action=promote']) node.slow_start() # Timeline 2 # Take stream page backup from instance in timeline2 - self.backup_node( - backup_dir, 'node', node, backup_type='full', + self.pb.backup_node('node', node, backup_type='full', options=['--stream', '--log-level-file=verbose']) node.cleanup() # restore stream backup - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) - xlog_dir = 'pg_wal' - if self.get_version(node) < 100000: - xlog_dir = 'pg_xlog' - - filepath = os.path.join(node.data_dir, xlog_dir, "00000002.history") + filepath = os.path.join(node.data_dir, 'pg_wal', "00000002.history") self.assertTrue( os.path.exists(filepath), "History file do not exists: {0}".format(filepath)) node.slow_start() - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() pg_basebackup_path = self.get_bin_path('pg_basebackup') - self.run_binary( + self.pb.run_binary( [ pg_basebackup_path, '-p', str(node.port), '-h', 'localhost', '-R', '-X', 'stream', '-D', node_restored.data_dir ]) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start(replica=True) +def call_repeat(times, func, *args): + for i in range(times): + func(*args) + +def create_table(node, name): + node.safe_psql( + 'postgres', + f"CREATE TABLE {name} AS " + "SELECT i, v as fat_attr " + "FROM generate_series(0,3) i, " + " (SELECT string_agg(md5(j::text), '') as v" + " FROM generate_series(0,500605) as j) v") + # TODO: # null offset STOP LSN and latest record in previous segment is conrecord (manual only) # archiving from promoted delayed replica diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 000000000..eea7b8c42 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,16 @@ +# Testgres can be installed in the following ways: +# 1. From a pip package (recommended) +# testgres==1.8.5 +# 2. From a specific Git branch, tag or commit +# git+https://github.com/postgrespro/testgres.git@ +# 3. From a local directory +# /path/to/local/directory/testgres +testgres==1.10.0 +testgres-pg-probackup2==0.0.2 +allure-pytest +deprecation +minio +pexpect +pytest==7.4.3 +pytest-xdist +parameterized diff --git a/tests/restore_test.py b/tests/restore_test.py index 67e99515c..798002944 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -1,32 +1,32 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest, fs_backup_class +from pg_probackup2.gdb import needs_gdb import subprocess -import sys from time import sleep from datetime import datetime, timedelta, timezone -import hashlib import shutil import json -from shutil import copyfile from testgres import QueryException, StartNodeException -from stat import S_ISDIR +import testgres.utils as testgres_utils +import re -class RestoreTest(ProbackupTest, unittest.TestCase): + +MAGIC_COUNT = 107183 + + +class RestoreTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure def test_restore_full_to_latest(self): """recovery to latest from full backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=2) @@ -35,22 +35,18 @@ def test_restore_full_to_latest(self): pgbench.wait() pgbench.stdout.close() before = node.table_checksum("pgbench_branches") - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) node.stop() node.cleanup() # 1 - Test recovery from latest - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) # 2 - Test that recovery.conf was created # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') with open(recovery_conf, 'r') as f: print(f.read()) @@ -66,39 +62,31 @@ def test_restore_full_to_latest(self): # @unittest.skip("skip") def test_restore_full_page_to_latest(self): """recovery to latest from full + page backups""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=2) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") + backup_id = self.pb.backup_node('node', node, backup_type="page") before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() @@ -108,33 +96,26 @@ def test_restore_full_page_to_latest(self): # @unittest.skip("skip") def test_restore_to_specific_timeline(self): """recovery to target timeline""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=2) before = node.table_checksum("pgbench_branches") - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) target_tli = int( node.get_control_data()["Latest checkpoint's TimeLineID"]) node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() pgbench = node.pgbench( @@ -143,24 +124,19 @@ def test_restore_to_specific_timeline(self): pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.stop() node.cleanup() # Correct Backup must be choosen for restore - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ "-j", "4", "--timeline={0}".format(target_tli)] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) - recovery_target_timeline = self.get_recovery_conf( - node)["recovery_target_timeline"] + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] self.assertEqual(int(recovery_target_timeline), target_tli) node.slow_start() @@ -170,25 +146,21 @@ def test_restore_to_specific_timeline(self): # @unittest.skip("skip") def test_restore_to_time(self): """recovery to target time""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={'TimeZone': 'GMT'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=2) before = node.table_checksum("pgbench_branches") - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) - target_time = node.execute( - "postgres", "SELECT to_char(now(), 'YYYY-MM-DD HH24:MI:SS+00')" - )[0][0] + node.safe_psql("postgres", "select txid_current()") + target_time = node.safe_psql("postgres", "SELECT current_timestamp").decode('utf-8').strip() pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() @@ -197,17 +169,13 @@ def test_restore_to_time(self): node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ - "-j", "4", '--time={0}'.format(target_time), + "-j", "4", '--recovery-target-time={0}'.format(target_time), "--recovery-target-action=promote" ] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() after = node.table_checksum("pgbench_branches") @@ -216,14 +184,11 @@ def test_restore_to_time(self): # @unittest.skip("skip") def test_restore_to_xid_inclusive(self): """recovery to target xid""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=2) @@ -231,7 +196,7 @@ def test_restore_to_xid_inclusive(self): con.execute("CREATE TABLE tbl0005 (a text)") con.commit() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -252,17 +217,12 @@ def test_restore_to_xid_inclusive(self): node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ "-j", "4", '--xid={0}'.format(target_xid), "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) @@ -272,14 +232,11 @@ def test_restore_to_xid_inclusive(self): # @unittest.skip("skip") def test_restore_to_xid_not_inclusive(self): """recovery with target inclusive false""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=2) @@ -287,7 +244,7 @@ def test_restore_to_xid_not_inclusive(self): con.execute("CREATE TABLE tbl0005 (a text)") con.commit() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -308,17 +265,13 @@ def test_restore_to_xid_not_inclusive(self): node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ "-j", "4", '--xid={0}'.format(target_xid), "--inclusive=false", - "--recovery-target-action=promote"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "--recovery-target-action=promote"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() after = node.table_checksum("pgbench_branches") @@ -329,17 +282,12 @@ def test_restore_to_xid_not_inclusive(self): # @unittest.skip("skip") def test_restore_to_lsn_inclusive(self): """recovery to target lsn""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - if self.get_version(node) < self.version_to_num('10.0'): - return + node = self.pg_node.make_simple('node') + node.set_auto_conf({"autovacuum": "off"}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=2) @@ -347,7 +295,7 @@ def test_restore_to_lsn_inclusive(self): con.execute("CREATE TABLE tbl0005 (a int)") con.commit() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -358,13 +306,13 @@ def test_restore_to_lsn_inclusive(self): with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() - res = con.execute("SELECT pg_current_wal_lsn()") - con.commit() con.execute("INSERT INTO tbl0005 VALUES (2)") + # With high probability, returned lsn will point at COMMIT start + # If this test still will be flappy, get lsn after commit and add + # one more xlog record (for example, with txid_current()+abort). + res = con.execute("SELECT pg_current_wal_insert_lsn()") con.commit() - xlogid, xrecoff = res[0][0].split('/') - xrecoff = hex(int(xrecoff, 16) + 1)[2:] - target_lsn = "{0}/{1}".format(xlogid, xrecoff) + target_lsn = res[0][0] pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -374,16 +322,12 @@ def test_restore_to_lsn_inclusive(self): node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ "-j", "4", '--lsn={0}'.format(target_lsn), "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() @@ -395,17 +339,11 @@ def test_restore_to_lsn_inclusive(self): # @unittest.skip("skip") def test_restore_to_lsn_not_inclusive(self): """recovery to target lsn""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - if self.get_version(node) < self.version_to_num('10.0'): - return + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=2) @@ -413,7 +351,7 @@ def test_restore_to_lsn_not_inclusive(self): con.execute("CREATE TABLE tbl0005 (a int)") con.commit() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -424,13 +362,13 @@ def test_restore_to_lsn_not_inclusive(self): with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() - res = con.execute("SELECT pg_current_wal_lsn()") - con.commit() con.execute("INSERT INTO tbl0005 VALUES (2)") + # Returned lsn will certainly point at COMMIT start OR BEFORE IT, + # if some background activity wrote record in between INSERT and + # COMMIT. Any way, test should succeed. + res = con.execute("SELECT pg_current_wal_insert_lsn()") con.commit() - xlogid, xrecoff = res[0][0].split('/') - xrecoff = hex(int(xrecoff, 16) + 1)[2:] - target_lsn = "{0}/{1}".format(xlogid, xrecoff) + target_lsn = res[0][0] pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -440,17 +378,13 @@ def test_restore_to_lsn_not_inclusive(self): node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ "--inclusive=false", "-j", "4", '--lsn={0}'.format(target_lsn), "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() @@ -465,15 +399,12 @@ def test_restore_full_ptrack_archive(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -482,29 +413,23 @@ def test_restore_full_ptrack_archive(self): node.pgbench_init(scale=2) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="ptrack") + backup_id = self.pb.backup_node('node', node, backup_type="ptrack") before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - + restore_result = self.pb.restore_node('node', node, + options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) @@ -515,15 +440,12 @@ def test_restore_ptrack(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -532,35 +454,30 @@ def test_restore_ptrack(self): node.pgbench_init(scale=2) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - self.backup_node(backup_dir, 'node', node, backup_type="ptrack") + self.pb.backup_node('node', node, backup_type="ptrack") pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="ptrack") + backup_id = self.pb.backup_node('node', node, backup_type="ptrack") before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, + options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() after = node.table_checksum("pgbench_branches") @@ -572,16 +489,13 @@ def test_restore_full_ptrack_stream(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -590,15 +504,14 @@ def test_restore_full_ptrack_stream(self): node.pgbench_init(scale=2) - self.backup_node(backup_dir, 'node', node, options=["--stream"]) + self.pb.backup_node('node', node, options=["--stream"]) pgbench = node.pgbench( stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type="ptrack", options=["--stream"]) before = node.table_checksum("pgbench_branches") @@ -606,12 +519,8 @@ def test_restore_full_ptrack_stream(self): node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() after = node.table_checksum("pgbench_branches") @@ -626,16 +535,13 @@ def test_restore_full_ptrack_under_load(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -644,7 +550,7 @@ def test_restore_full_ptrack_under_load(self): node.pgbench_init(scale=2) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench( stdout=subprocess.PIPE, @@ -652,8 +558,7 @@ def test_restore_full_ptrack_under_load(self): options=["-c", "4", "-T", "8"] ) - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type="ptrack", options=["--stream"]) pgbench.wait() @@ -668,12 +573,8 @@ def test_restore_full_ptrack_under_load(self): node.stop() node.cleanup() - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() bbalance = node.execute( @@ -691,16 +592,13 @@ def test_restore_full_under_load_ptrack(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums']) + ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -716,13 +614,12 @@ def test_restore_full_under_load_ptrack(self): options=["-c", "4", "-T", "8"] ) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench.wait() pgbench.stdout.close() - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type="ptrack", options=["--stream"]) bbalance = node.execute( @@ -734,14 +631,9 @@ def test_restore_full_under_load_ptrack(self): node.stop() node.cleanup() - # self.wrong_wal_clean(node, wal_segment_size) - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, options=["-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + restore_result = self.pb.restore_node('node', node, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() bbalance = node.execute( "postgres", "SELECT sum(bbalance) FROM pgbench_branches") @@ -752,14 +644,11 @@ def test_restore_full_under_load_ptrack(self): # @unittest.skip("skip") def test_restore_with_tablespace_mapping_1(self): """recovery using tablespace-mapping option""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Create tablespace @@ -773,45 +662,23 @@ def test_restore_with_tablespace_mapping_1(self): con.execute("INSERT INTO test VALUES (1)") con.commit() - backup_id = self.backup_node(backup_dir, 'node', node) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + backup_id = self.pb.backup_node('node', node) + self.assertEqual(self.pb.show('node')[0]['status'], "OK") # 1 - Try to restore to existing directory node.stop() - try: - self.restore_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because restore destination is not empty.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Restore destination is not empty:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + expect_error="because restore destination is not empty") + self.assertMessage(contains='ERROR: Restore destination is not empty:') # 2 - Try to restore to existing tablespace directory tblspc_path_tmp = os.path.join(node.base_dir, "tblspc_tmp") os.rename(tblspc_path, tblspc_path_tmp) - node.cleanup() + shutil.rmtree(node.data_dir) os.rename(tblspc_path_tmp, tblspc_path) - try: - self.restore_node(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because restore tablespace destination is " - "not empty.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Restore tablespace destination is not empty:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + expect_error="because restore tablespace destination is not empty") + self.assertMessage(contains='ERROR: Restore tablespace destination is not empty:') # 3 - Restore using tablespace-mapping to not empty directory tblspc_path_temp = os.path.join(node.base_dir, "tblspc_temp") @@ -819,34 +686,18 @@ def test_restore_with_tablespace_mapping_1(self): with open(os.path.join(tblspc_path_temp, 'file'), 'w+') as f: f.close() - try: - self.restore_node( - backup_dir, 'node', node, - options=["-T", "%s=%s" % (tblspc_path, tblspc_path_temp)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because restore tablespace destination is " - "not empty.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Restore tablespace destination is not empty:', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, + options=["-T", f"{tblspc_path}={tblspc_path_temp}"], + expect_error="because restore tablespace destination is not empty") + self.assertMessage(contains='ERROR: Restore tablespace destination is not empty:') # 4 - Restore using tablespace-mapping tblspc_path_new = os.path.join(node.base_dir, "tblspc_new") - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ "-T", "%s=%s" % (tblspc_path, tblspc_path_new)] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() @@ -854,29 +705,24 @@ def test_restore_with_tablespace_mapping_1(self): self.assertEqual(result[0][0], 1) # 4 - Restore using tablespace-mapping using page backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) with node.connect("postgres") as con: con.execute("INSERT INTO test VALUES (2)") con.commit() - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") + backup_id = self.pb.backup_node('node', node, backup_type="page") - show_pb = self.show_pb(backup_dir, 'node') + show_pb = self.pb.show('node') self.assertEqual(show_pb[1]['status'], "OK") self.assertEqual(show_pb[2]['status'], "OK") node.stop() - node.cleanup() + shutil.rmtree(node.data_dir) tblspc_path_page = os.path.join(node.base_dir, "tblspc_page") - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ - "-T", "%s=%s" % (tblspc_path_new, tblspc_path_page)]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "-T", "%s=%s" % (tblspc_path_new, tblspc_path_page)]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() result = node.execute("postgres", "SELECT id FROM test OFFSET 1") @@ -885,19 +731,16 @@ def test_restore_with_tablespace_mapping_1(self): # @unittest.skip("skip") def test_restore_with_tablespace_mapping_2(self): """recovery using tablespace-mapping option and page backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Full backup - self.backup_node(backup_dir, 'node', node) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") + self.pb.backup_node('node', node) + self.assertEqual(self.pb.show('node')[0]['status'], "OK") # Create tablespace tblspc_path = os.path.join(node.base_dir, "tblspc") @@ -912,16 +755,16 @@ def test_restore_with_tablespace_mapping_2(self): con.commit() # First page backup - self.backup_node(backup_dir, 'node', node, backup_type="page") - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") + self.pb.backup_node('node', node, backup_type="page") + self.assertEqual(self.pb.show('node')[1]['status'], "OK") self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['backup-mode'], "PAGE") + self.pb.show('node')[1]['backup-mode'], "PAGE") # Create tablespace table with node.connect("postgres") as con: -# con.connection.autocommit = True -# con.execute("CHECKPOINT") -# con.connection.autocommit = False + con.connection.autocommit = True + con.execute("CHECKPOINT") + con.connection.autocommit = False con.execute("CREATE TABLE tbl1 (a int) TABLESPACE tblspc") con.execute( "INSERT INTO tbl1 SELECT * " @@ -929,25 +772,20 @@ def test_restore_with_tablespace_mapping_2(self): con.commit() # Second page backup - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") - self.assertEqual(self.show_pb(backup_dir, 'node')[2]['status'], "OK") + backup_id = self.pb.backup_node('node', node, backup_type="page") + self.assertEqual(self.pb.show('node')[2]['status'], "OK") self.assertEqual( - self.show_pb(backup_dir, 'node')[2]['backup-mode'], "PAGE") + self.pb.show('node')[2]['backup-mode'], "PAGE") node.stop() node.cleanup() tblspc_path_new = os.path.join(node.base_dir, "tblspc_new") - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ - "-T", "%s=%s" % (tblspc_path, tblspc_path_new)]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "-T", "%s=%s" % (tblspc_path, tblspc_path_new)]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() count = node.execute("postgres", "SELECT count(*) FROM tbl") @@ -958,14 +796,12 @@ def test_restore_with_tablespace_mapping_2(self): # @unittest.skip("skip") def test_restore_with_missing_or_corrupted_tablespace_map(self): """restore backup with missing or corrupted tablespace_map""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Create tablespace @@ -973,109 +809,60 @@ def test_restore_with_missing_or_corrupted_tablespace_map(self): node.pgbench_init(scale=1, tablespace='tblspace') # Full backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # Change some data pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum']) pgbench.wait() # Page backup - page_id = self.backup_node(backup_dir, 'node', node, backup_type="page") + page_id = self.pb.backup_node('node', node, backup_type="page") pgdata = self.pgdata_content(node.data_dir) - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.pg_node.make_simple('node2') node2.cleanup() olddir = self.get_tblspace_path(node, 'tblspace') newdir = self.get_tblspace_path(node2, 'tblspace') # drop tablespace_map - tablespace_map = os.path.join( - backup_dir, 'backups', 'node', - page_id, 'database', 'tablespace_map') - - tablespace_map_tmp = os.path.join( - backup_dir, 'backups', 'node', - page_id, 'database', 'tablespace_map_tmp') - - os.rename(tablespace_map, tablespace_map_tmp) - - try: - self.restore_node( - backup_dir, 'node', node2, - options=["-T", "{0}={1}".format(olddir, newdir)]) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is missing.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Tablespace map is missing: "{0}", ' - 'probably backup {1} is corrupt, validate it'.format( - tablespace_map, page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node(backup_dir, 'node', node2) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is missing.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Tablespace map is missing: "{0}", ' - 'probably backup {1} is corrupt, validate it'.format( - tablespace_map, page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - copyfile(tablespace_map_tmp, tablespace_map) - - with open(tablespace_map, "a") as f: - f.write("HELLO\n") - - try: - self.restore_node( - backup_dir, 'node', node2, - options=["-T", "{0}={1}".format(olddir, newdir)]) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is corupted.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid CRC of tablespace map file "{0}"'.format(tablespace_map), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node(backup_dir, 'node', node2) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is corupted.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Invalid CRC of tablespace map file "{0}"'.format(tablespace_map), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # rename it back - os.rename(tablespace_map_tmp, tablespace_map) - - print(self.restore_node( - backup_dir, 'node', node2, + tablespace_map = self.read_backup_file(backup_dir, 'node', page_id, + 'database/tablespace_map', text=True) + self.remove_backup_file(backup_dir, 'node', page_id, 'database/tablespace_map') + + self.pb.restore_node('node', node=node2, + options=["-T", "{0}={1}".format(olddir, newdir)], + expect_error="because tablespace_map is missing") + self.assertMessage(regex= + rf'ERROR: Tablespace map is missing: "[^"]*{page_id}[^"]*tablespace_map", ' + rf'probably backup {page_id} is corrupt, validate it') + + self.pb.restore_node('node', node=node2, + expect_error="because tablespace_map is missing") + self.assertMessage(regex= + rf'ERROR: Tablespace map is missing: "[^"]*{page_id}[^"]*tablespace_map", ' + rf'probably backup {page_id} is corrupt, validate it') + + self.corrupt_backup_file(backup_dir, 'node', page_id, 'database/tablespace_map', + overwrite=tablespace_map + "HELLO\n", text=True) + + self.pb.restore_node('node', node=node2, + options=["-T", f"{olddir}={newdir}"], + expect_error="because tablespace_map is corupted") + self.assertMessage(regex=r'ERROR: Invalid CRC of tablespace map file ' + rf'"[^"]*{page_id}[^"]*tablespace_map"') + + self.pb.restore_node('node', node=node2, + expect_error="because tablespace_map is corupted") + self.assertMessage(regex=r'ERROR: Invalid CRC of tablespace map file ' + rf'"[^"]*{page_id}[^"]*tablespace_map"') + + # write correct back + self.write_backup_file(backup_dir, 'node', page_id, 'database/tablespace_map', + tablespace_map, text=True) + + print(self.pb.restore_node('node', node2, options=["-T", "{0}={1}".format(olddir, newdir)])) pgdata_restored = self.pgdata_content(node2.data_dir) @@ -1087,39 +874,30 @@ def test_archive_node_backup_stream_restore_to_recovery_time(self): make node with archiving, make stream backup, make PITR to Recovery Time """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) + backup_id = self.pb.backup_node('node', node, options=["--stream"]) node.safe_psql("postgres", "create table t_heap(a int)") node.stop() node.cleanup() - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] - - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, - options=[ - "-j", "4", '--time={0}'.format(recovery_time), - "--recovery-target-action=promote" - ] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + recovery_time = self.pb.show('node', backup_id)['recovery-time'] + restore_result = self.pb.restore_node('node', node, + options=[ + "-j", "4", '--time={0}'.format(recovery_time), + "--recovery-target-action=promote" + ] + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() result = node.psql("postgres", 'select * from t_heap') @@ -1132,30 +910,25 @@ def test_archive_node_backup_stream_restore_to_recovery_time(self): make node with archiving, make stream backup, make PITR to Recovery Time """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) + backup_id = self.pb.backup_node('node', node, options=["--stream"]) node.safe_psql("postgres", "create table t_heap(a int)") node.stop() node.cleanup() - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] + recovery_time = self.pb.show('node', backup_id)['recovery-time'] self.assertIn( "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ "-j", "4", '--time={0}'.format(recovery_time), "--recovery-target-action=promote" @@ -1174,38 +947,30 @@ def test_archive_node_backup_stream_pitr(self): """ make node with archiving, make stream backup, create table t_heap, make pitr to Recovery Time, - check that t_heap do not exists + check that t_heap does not exist """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) + backup_id = self.pb.backup_node('node', node, options=["--stream"]) node.safe_psql("postgres", "create table t_heap(a int)") node.cleanup() - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] + recovery_time = self.pb.show('node', backup_id)['recovery-time'] - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + restore_result = self.pb.restore_node('node', node, options=[ "-j", "4", '--time={0}'.format(recovery_time), "--recovery-target-action=promote" ] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) node.slow_start() @@ -1218,48 +983,40 @@ def test_archive_node_backup_archive_pitr_2(self): """ make node with archiving, make archive backup, create table t_heap, make pitr to Recovery Time, - check that t_heap do not exists + check that t_heap do not exist """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) if self.paranoia: pgdata = self.pgdata_content(node.data_dir) node.safe_psql("postgres", "create table t_heap(a int)") node.stop() - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] + recovery_time = self.pb.show('node', backup_id)['recovery-time'] - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node_restored, + resotre_result = self.pb.restore_node('node', node_restored, options=[ "-j", "4", '--time={0}'.format(recovery_time), "--recovery-target-action=promote"] - ), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + ) + self.assertMessage(resotre_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.set_auto_conf(node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() @@ -1274,17 +1031,15 @@ def test_archive_restore_to_restore_point(self): create table t_heap, make pitr to Recovery Time, check that t_heap do not exists """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -1297,8 +1052,7 @@ def test_archive_restore_to_restore_point(self): "create table t_heap_1 as select generate_series(0,10000)") node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ "--recovery-target-name=savepoint", "--recovery-target-action=promote"]) @@ -1316,17 +1070,15 @@ def test_archive_restore_to_restore_point(self): @unittest.skip("skip") # @unittest.expectedFailure def test_zags_block_corrupt(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) conn = node.connect() with node.connect("postgres") as conn: @@ -1370,37 +1122,29 @@ def test_zags_block_corrupt(self): conn.execute( "insert into tbl select i from generate_series(0,100) as i") - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), - initdb_params=['--data-checksums']) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) - self.set_auto_conf( - node_restored, - {'archive_mode': 'off', 'hot_standby': 'on', 'port': node_restored.port}) + node_restored.set_auto_conf({'archive_mode': 'off', 'hot_standby': 'on', 'port': node_restored.port}) node_restored.slow_start() @unittest.skip("skip") # @unittest.expectedFailure def test_zags_block_corrupt_1(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={ 'full_page_writes': 'on'} ) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql('postgres', 'create table tbl(i int)') @@ -1444,20 +1188,15 @@ def test_zags_block_corrupt_1(self): self.switch_wal_segment(node) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), - initdb_params=['--data-checksums']) + node_restored = self.pg_node.make_simple('node_restored') pgdata = self.pgdata_content(node.data_dir) node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) - self.set_auto_conf( - node_restored, - {'archive_mode': 'off', 'hot_standby': 'on', 'port': node_restored.port}) + node_restored.set_auto_conf({'archive_mode': 'off', 'hot_standby': 'on', 'port': node_restored.port}) node_restored.slow_start() @@ -1475,7 +1214,7 @@ def test_zags_block_corrupt_1(self): # pg_xlogdump_path = self.get_bin_path('pg_xlogdump') -# pg_xlogdump = self.run_binary( +# pg_xlogdump = self.pb.run_binary( # [ # pg_xlogdump_path, '-b', # os.path.join(backup_dir, 'wal', 'node', '000000010000000000000003'), @@ -1495,259 +1234,209 @@ def test_restore_chain(self): ERROR delta backups, take valid delta backup, restore must be successfull """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL - self.backup_node( - backup_dir, 'node', node) + self.pb.backup_node('node', node) # Take DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass + self.pb.backup_node('node', node, backup_type='delta', + options=['-U', 'wrong_name'], + expect_error=True) # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass + self.pb.backup_node('node', node, backup_type='delta', + options=['-U', 'wrong_name'], + expect_error=True) # Take DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass + self.pb.backup_node('node', node, backup_type='delta', + options=['-U', 'wrong_name'], + expect_error=True) self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[0]['status'], + self.pb.show('node')[0]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], + self.pb.show('node')[2]['status'], 'Backup STATUS should be "ERROR"') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[3]['status'], + self.pb.show('node')[3]['status'], 'Backup STATUS should be "ERROR"') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[4]['status'], + self.pb.show('node')[4]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[5]['status'], + self.pb.show('node')[5]['status'], 'Backup STATUS should be "ERROR"') node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) # @unittest.skip("skip") def test_restore_chain_with_corrupted_backup(self): """more complex test_restore_chain()""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL - self.backup_node( - backup_dir, 'node', node) + self.pb.backup_node('node', node) # Take DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass + self.pb.backup_node('node', node, backup_type='page', + options=['-U', 'wrong_name'], + expect_error=True) # Take 1 DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass + self.pb.backup_node('node', node, backup_type='delta', + options=['-U', 'wrong_name'], + expect_error=True) # Take 2 DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # Take ERROR DELTA - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='delta', options=['-U', 'wrong_name']) - except ProbackupException as e: - pass + self.pb.backup_node('node', node, backup_type='delta', + options=['-U', 'wrong_name'], + expect_error=True) # Take 3 DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # Corrupted 4 DELTA - corrupt_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + corrupt_id = self.pb.backup_node('node', node, backup_type='delta') # ORPHAN 5 DELTA - restore_target_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + restore_target_id = self.pb.backup_node('node', node, backup_type='delta') # ORPHAN 6 DELTA - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # NEXT FULL BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='full') + self.pb.backup_node('node', node, backup_type='full') # Next Delta - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # do corrupt 6 DELTA backup - file = os.path.join( - backup_dir, 'backups', 'node', - corrupt_id, 'database', 'global', 'pg_control') - - file_new = os.path.join(backup_dir, 'pg_control') - os.rename(file, file_new) + self.remove_backup_file(backup_dir, 'node', corrupt_id, + 'database/global/pg_control') # RESTORE BACKUP node.cleanup() - try: - self.restore_node( - backup_dir, 'node', node, backup_id=restore_target_id) - self.assertEqual( - 1, 0, - "Expecting Error because restore backup is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} is orphan'.format(restore_target_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, backup_id=restore_target_id, + expect_error="because restore backup is corrupted") + self.assertMessage(contains=f'ERROR: Backup {restore_target_id} is orphan') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[0]['status'], + self.pb.show('node')[0]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[2]['status'], + self.pb.show('node')[2]['status'], 'Backup STATUS should be "ERROR"') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[3]['status'], + self.pb.show('node')[3]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[4]['status'], + self.pb.show('node')[4]['status'], 'Backup STATUS should be "ERROR"') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[5]['status'], + self.pb.show('node')[5]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[6]['status'], + self.pb.show('node')[6]['status'], 'Backup STATUS should be "ERROR"') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[7]['status'], + self.pb.show('node')[7]['status'], 'Backup STATUS should be "OK"') # corruption victim self.assertEqual( 'CORRUPT', - self.show_pb(backup_dir, 'node')[8]['status'], + self.pb.show('node')[8]['status'], 'Backup STATUS should be "CORRUPT"') # orphaned child self.assertEqual( 'ORPHAN', - self.show_pb(backup_dir, 'node')[9]['status'], + self.pb.show('node')[9]['status'], 'Backup STATUS should be "ORPHAN"') # orphaned child self.assertEqual( 'ORPHAN', - self.show_pb(backup_dir, 'node')[10]['status'], + self.pb.show('node')[10]['status'], 'Backup STATUS should be "ORPHAN"') # next FULL self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[11]['status'], + self.pb.show('node')[11]['status'], 'Backup STATUS should be "OK"') # next DELTA self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[12]['status'], + self.pb.show('node')[12]['status'], 'Backup STATUS should be "OK"') node.cleanup() @@ -1759,37 +1448,34 @@ def test_restore_chain_with_corrupted_backup(self): @unittest.skip("skip") def test_restore_backup_from_future(self): """more complex test_restore_chain()""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + if not backup_dir.is_file_based: + self.skipTest("test uses 'rename' in backup directory") + + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=5) # pgbench = node.pgbench(options=['-T', '20', '-c', '2']) # pgbench.wait() # Take PAGE from future - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') - with open( - os.path.join( - backup_dir, 'backups', 'node', - backup_id, "backup.control"), "a") as conf: - conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() + timedelta(days=3))) + with self.modify_backup_control(backup_dir, 'node', backup_id) as cf: + cf.data += "\nstart-time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() + timedelta(days=3)) # rename directory - new_id = self.show_pb(backup_dir, 'node')[1]['id'] + new_id = self.pb.show('node')[1]['id'] os.rename( os.path.join(backup_dir, 'backups', 'node', backup_id), @@ -1798,11 +1484,11 @@ def test_restore_backup_from_future(self): pgbench = node.pgbench(options=['-T', '7', '-c', '1', '--no-vacuum']) pgbench.wait() - backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + self.pb.restore_node('node', node=node, backup_id=backup_id) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -1813,29 +1499,25 @@ def test_restore_target_immediate_stream(self): correct handling of immediate recovery target for STREAM backups """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # Take FULL - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) # Take delta - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) pgdata = self.pgdata_content(node.data_dir) # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') with open(recovery_conf, 'r') as f: print(f.read()) @@ -1844,8 +1526,7 @@ def test_restore_target_immediate_stream(self): # restore delta backup node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--immediate']) + self.pb.restore_node('node', node, options=['--immediate']) self.assertTrue( os.path.isfile(recovery_conf), @@ -1853,8 +1534,7 @@ def test_restore_target_immediate_stream(self): # restore delta backup node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--recovery-target=immediate']) + self.pb.restore_node('node', node, options=['--recovery-target=immediate']) self.assertTrue( os.path.isfile(recovery_conf), @@ -1866,30 +1546,26 @@ def test_restore_target_immediate_archive(self): correct handling of immediate recovery target for ARCHIVE backups """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL - self.backup_node( - backup_dir, 'node', node) + self.pb.backup_node('node', node) # Take delta - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') with open(recovery_conf, 'r') as f: print(f.read()) @@ -1898,8 +1574,7 @@ def test_restore_target_immediate_archive(self): # restore page backup node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--immediate']) + self.pb.restore_node('node', node, options=['--immediate']) # For archive backup with immediate recovery target # recovery.conf is mandatory @@ -1908,42 +1583,41 @@ def test_restore_target_immediate_archive(self): # restore page backup node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--recovery-target=immediate']) + self.pb.restore_node('node', node, options=['--recovery-target=immediate']) # For archive backup with immediate recovery target # recovery.conf is mandatory with open(recovery_conf, 'r') as f: self.assertIn("recovery_target = 'immediate'", f.read()) - # @unittest.skip("skip") + # Skipped, because default recovery_target_timeline is 'current' + # Before PBCKP-598 the --recovery-target=latest' option did not work and this test allways passed + @unittest.skip("skip") def test_restore_target_latest_archive(self): """ make sure that recovery_target 'latest' is default recovery target """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') else: recovery_conf = os.path.join(node.data_dir, 'recovery.conf') # restore node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) # hash_1 = hashlib.md5( # open(recovery_conf, 'rb').read()).hexdigest() @@ -1960,7 +1634,7 @@ def test_restore_target_latest_archive(self): content_1 += line node.cleanup() - self.restore_node(backup_dir, 'node', node, options=['--recovery-target=latest']) + self.pb.restore_node('node', node=node, options=['--recovery-target=latest']) # hash_2 = hashlib.md5( # open(recovery_conf, 'rb').read()).hexdigest() @@ -1984,22 +1658,20 @@ def test_restore_target_new_options(self): check that new --recovery-target-* options are working correctly """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') with open(recovery_conf, 'r') as f: print(f.read()) @@ -2022,7 +1694,7 @@ def test_restore_target_new_options(self): target_name = 'savepoint' # in python-3.6+ it can be ...now()..astimezone()... - target_time = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone().strftime("%Y-%m-%d %H:%M:%S %z") + target_time = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone().strftime("%Y-%m-%d %H:%M:%S.%f%z") with node.connect("postgres") as con: res = con.execute( "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") @@ -2032,10 +1704,7 @@ def test_restore_target_new_options(self): with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() - if self.get_version(node) > self.version_to_num('10.0'): - res = con.execute("SELECT pg_current_wal_lsn()") - else: - res = con.execute("SELECT pg_current_xlog_location()") + res = con.execute("SELECT pg_current_wal_lsn()") con.commit() con.execute("INSERT INTO tbl0005 VALUES (2)") @@ -2046,8 +1715,7 @@ def test_restore_target_new_options(self): # Restore with recovery target time node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target-time={0}'.format(target_time), "--recovery-target-action=promote", @@ -2073,8 +1741,7 @@ def test_restore_target_new_options(self): # Restore with recovery target xid node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target-xid={0}'.format(target_xid), "--recovery-target-action=promote", @@ -2100,8 +1767,7 @@ def test_restore_target_new_options(self): # Restore with recovery target name node.cleanup() - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--recovery-target-name={0}'.format(target_name), "--recovery-target-action=promote", @@ -2126,33 +1792,31 @@ def test_restore_target_new_options(self): node.slow_start() # Restore with recovery target lsn - if self.get_version(node) >= 100000: - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-lsn={0}'.format(target_lsn), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) + node.cleanup() + self.pb.restore_node('node', node, + options=[ + '--recovery-target-lsn={0}'.format(target_lsn), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() - self.assertIn( - "recovery_target_lsn = '{0}'".format(target_lsn), - recovery_conf_content) + self.assertIn( + "recovery_target_lsn = '{0}'".format(target_lsn), + recovery_conf_content) - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) - node.slow_start() + node.slow_start() # @unittest.skip("skip") def test_smart_restore(self): @@ -2163,15 +1827,13 @@ def test_smart_restore(self): copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # create database @@ -2180,7 +1842,7 @@ def test_smart_restore(self): "CREATE DATABASE testdb") # take FULL backup - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # drop database node.safe_psql( @@ -2188,29 +1850,24 @@ def test_smart_restore(self): "DROP DATABASE testdb") # take PAGE backup - page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id = self.pb.backup_node('node', node, backup_type='page') # restore PAGE backup node.cleanup() - self.restore_node( - backup_dir, 'node', node, backup_id=page_id, + self.pb.restore_node('node', node, backup_id=page_id, options=['--no-validate', '--log-level-file=VERBOSE']) - logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(logfile, 'r') as f: - logfile_content = f.read() + logfile_content = self.read_pb_log() # get delta between FULL and PAGE filelists - filelist_full = self.get_backup_filelist( - backup_dir, 'node', full_id) + filelist_full = self.get_backup_filelist(backup_dir, 'node', full_id) - filelist_page = self.get_backup_filelist( - backup_dir, 'node', page_id) + filelist_page = self.get_backup_filelist(backup_dir, 'node', page_id) filelist_diff = self.get_backup_filelist_diff( filelist_full, filelist_page) + self.assertTrue(filelist_diff, 'There should be deleted files') for file in filelist_diff: self.assertNotIn(file, logfile_content) @@ -2222,61 +1879,52 @@ def test_pg_11_group_access(self): if self.pg_config_version < self.version_to_num('11.0'): self.skipTest('You need PostgreSQL >= 11 for this test') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=[ - '--data-checksums', - '--allow-group-access']) + initdb_params=['--allow-group-access']) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # take FULL backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) # restore backup - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node( - backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) # compare pgdata permissions pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @needs_gdb def test_restore_concurrent_drop_table(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=1) # FULL backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=['--stream', '--compress']) # DELTA backup - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='delta', + gdb = self.pb.backup_node('node', node, backup_type='delta', options=['--stream', '--compress', '--no-validate'], gdb=True) @@ -2292,14 +1940,12 @@ def test_restore_concurrent_drop_table(self): 'postgres', 'CHECKPOINT') - gdb.remove_all_breakpoints() gdb.continue_execution_until_exit() pgdata = self.pgdata_content(node.data_dir) node.cleanup() - self.restore_node( - backup_dir, 'node', node, options=['--no-validate']) + self.pb.restore_node('node', node, options=['--no-validate']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -2307,56 +1953,35 @@ def test_restore_concurrent_drop_table(self): # @unittest.skip("skip") def test_lost_non_data_file(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node('node', node, options=['--stream']) - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'postgresql.auto.conf') - - os.remove(file) + self.remove_backup_file(backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf') node.cleanup() - try: - self.restore_node( - backup_dir, 'node', node, options=['--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because of non-data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'No such file or directory', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Backup files restoring failed', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node, options=['--no-validate'], + expect_error="because of non-data file dissapearance") + self.assertMessage(contains='No such file') + self.assertMessage(contains='ERROR: Backup files restoring failed') def test_partial_restore_exclude(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() for i in range(1, 10, 1): @@ -2377,34 +2002,20 @@ def test_partial_restore_exclude(self): db_list[line['datname']] = line['oid'] # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgdata = self.pgdata_content(node.data_dir) # restore FULL backup - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1 = self.pg_node.make_simple('node_restored_1') node_restored_1.cleanup() - try: - self.restore_node( - backup_dir, 'node', - node_restored_1, options=[ - "--db-include=db1", - "--db-exclude=db2"]) - self.assertEqual( - 1, 0, - "Expecting Error because of 'db-exclude' and 'db-include'.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--db-include' " - "and '--db-exclude' together", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_restored_1, + options=["--db-include=db1", "--db-exclude=db2"], + expect_error="because of 'db-exclude' and 'db-include'") + self.assertMessage(contains="ERROR: You cannot specify '--db-include' " + "and '--db-exclude' together") - self.restore_node( - backup_dir, 'node', node_restored_1) + self.pb.restore_node('node', node_restored_1) pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) self.compare_pgdata(pgdata, pgdata_restored_1) @@ -2418,12 +2029,10 @@ def test_partial_restore_exclude(self): self.truncate_every_file_in_dir(db5_path) pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - node_restored_2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) + node_restored_2 = self.pg_node.make_simple('node_restored_2') node_restored_2.cleanup() - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored_2, options=[ "--db-exclude=db1", "--db-exclude=db5"]) @@ -2431,7 +2040,7 @@ def test_partial_restore_exclude(self): pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) - self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) + node_restored_2.set_auto_conf({'port': node_restored_2.port}) node_restored_2.slow_start() @@ -2460,14 +2069,12 @@ def test_partial_restore_exclude(self): def test_partial_restore_exclude_tablespace(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() cat_version = node.get_control_data()["Catalog version number"] @@ -2504,18 +2111,16 @@ def test_partial_restore_exclude_tablespace(self): db_list[line['datname']] = line['oid'] # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgdata = self.pgdata_content(node.data_dir) # restore FULL backup - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1 = self.pg_node.make_simple('node_restored_1') node_restored_1.cleanup() node1_tablespace = self.get_tblspace_path(node_restored_1, 'somedata') - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored_1, options=[ "-T", "{0}={1}".format( node_tablespace, node1_tablespace)]) @@ -2534,13 +2139,11 @@ def test_partial_restore_exclude_tablespace(self): pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - node_restored_2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) + node_restored_2 = self.pg_node.make_simple('node_restored_2') node_restored_2.cleanup() node2_tablespace = self.get_tblspace_path(node_restored_2, 'somedata') - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored_2, options=[ "--db-exclude=db1", "--db-exclude=db5", @@ -2550,7 +2153,7 @@ def test_partial_restore_exclude_tablespace(self): pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) - self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) + node_restored_2.set_auto_conf({'port': node_restored_2.port}) node_restored_2.slow_start() @@ -2580,14 +2183,12 @@ def test_partial_restore_exclude_tablespace(self): def test_partial_restore_include(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() for i in range(1, 10, 1): @@ -2608,34 +2209,20 @@ def test_partial_restore_include(self): db_list[line['datname']] = line['oid'] # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) pgdata = self.pgdata_content(node.data_dir) # restore FULL backup - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1 = self.pg_node.make_simple('node_restored_1') node_restored_1.cleanup() - try: - self.restore_node( - backup_dir, 'node', - node_restored_1, options=[ - "--db-include=db1", - "--db-exclude=db2"]) - self.assertEqual( - 1, 0, - "Expecting Error because of 'db-exclude' and 'db-include'.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--db-include' " - "and '--db-exclude' together", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_restored_1, + options=["--db-include=db1", "--db-exclude=db2"], + expect_error="because of 'db-exclude' and 'db-include'") + self.assertMessage(contains="ERROR: You cannot specify '--db-include' " + "and '--db-exclude' together") - self.restore_node( - backup_dir, 'node', node_restored_1) + self.pb.restore_node('node', node_restored_1) pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) self.compare_pgdata(pgdata, pgdata_restored_1) @@ -2651,12 +2238,10 @@ def test_partial_restore_include(self): pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) - node_restored_2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) + node_restored_2 = self.pg_node.make_simple('node_restored_2') node_restored_2.cleanup() - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored_2, options=[ "--db-include=db1", "--db-include=db5", @@ -2665,7 +2250,7 @@ def test_partial_restore_include(self): pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir) self.compare_pgdata(pgdata_restored_1, pgdata_restored_2) - self.set_auto_conf(node_restored_2, {'port': node_restored_2.port}) + node_restored_2.set_auto_conf({'port': node_restored_2.port}) node_restored_2.slow_start() node_restored_2.safe_psql( @@ -2706,14 +2291,12 @@ def test_partial_restore_backward_compatibility_1(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) node.slow_start() @@ -2724,35 +2307,21 @@ def test_partial_restore_backward_compatibility_1(self): 'CREATE database db{0}'.format(i)) # FULL backup with old binary, without partial restore support - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, old_binary=True, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - try: - self.restore_node( - backup_dir, 'node', - node_restored, options=[ - "--db-exclude=db5"]) - self.assertEqual( - 1, 0, - "Expecting Error because backup do not support partial restore.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} doesn't contain a database_map, " - "partial restore is impossible".format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_restored, + options=["--db-exclude=db5"], + expect_error="because backup do not support partial restore") + self.assertMessage(contains=f"ERROR: Backup {backup_id} doesn't contain " + "a database_map, partial restore is impossible") - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -2774,13 +2343,12 @@ def test_partial_restore_backward_compatibility_1(self): line = json.loads(line) db_list[line['datname']] = line['oid'] - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) # get etalon node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) self.truncate_every_file_in_dir( os.path.join( node_restored.data_dir, 'base', db_list['db5'])) @@ -2790,12 +2358,10 @@ def test_partial_restore_backward_compatibility_1(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) # get new node - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1 = self.pg_node.make_simple('node_restored_1') node_restored_1.cleanup() - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored_1, options=[ "--db-exclude=db5", "--db-exclude=db14"]) @@ -2811,14 +2377,12 @@ def test_partial_restore_backward_compatibility_merge(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir, old_binary=True) - self.add_instance(backup_dir, 'node', node, old_binary=True) + self.pb.init(old_binary=True) + self.pb.add_instance('node', node, old_binary=True) node.slow_start() @@ -2829,35 +2393,21 @@ def test_partial_restore_backward_compatibility_merge(self): 'CREATE database db{0}'.format(i)) # FULL backup with old binary, without partial restore support - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, old_binary=True, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - try: - self.restore_node( - backup_dir, 'node', - node_restored, options=[ - "--db-exclude=db5"]) - self.assertEqual( - 1, 0, - "Expecting Error because backup do not support partial restore.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} doesn't contain a database_map, " - "partial restore is impossible.".format(backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node=node_restored, + options=["--db-exclude=db5"], + expect_error="because backup do not support partial restore") + self.assertMessage(contains=f"ERROR: Backup {backup_id} doesn't contain a database_map, " + "partial restore is impossible.") - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node=node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -2879,13 +2429,12 @@ def test_partial_restore_backward_compatibility_merge(self): line = json.loads(line) db_list[line['datname']] = line['oid'] - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) # get etalon node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) self.truncate_every_file_in_dir( os.path.join( node_restored.data_dir, 'base', db_list['db5'])) @@ -2895,15 +2444,13 @@ def test_partial_restore_backward_compatibility_merge(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) # get new node - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1 = self.pg_node.make_simple('node_restored_1') node_restored_1.cleanup() # merge - self.merge_backup(backup_dir, 'node', backup_id=backup_id) + self.pb.merge_backup('node', backup_id=backup_id) - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored_1, options=[ "--db-exclude=db5", "--db-exclude=db14"]) @@ -2914,14 +2461,12 @@ def test_partial_restore_backward_compatibility_merge(self): def test_empty_and_mangled_database_map(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() @@ -2932,93 +2477,46 @@ def test_empty_and_mangled_database_map(self): 'CREATE database db{0}'.format(i)) # FULL backup with database_map - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node('node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - # truncate database_map - path = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'database_map') - with open(path, "w") as f: - f.close() + self.corrupt_backup_file(backup_dir, 'node', backup_id, + 'database/database_map', truncate=0) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-include=db1", '--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} has empty or mangled database_map, " - "partial restore is impossible".format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-exclude=db1", '--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} has empty or mangled database_map, " - "partial restore is impossible".format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - # mangle database_map - with open(path, "w") as f: - f.write("42") - f.close() - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-include=db1", '--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Field "dbOid" is not found in the line 42 of ' - 'the file backup_content.control', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-exclude=db1", '--no-validate']) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Field "dbOid" is not found in the line 42 of ' - 'the file backup_content.control', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node_restored, + options=["--db-include=db1", '--no-validate'], + expect_error="because database_map is empty") + self.assertMessage(contains=f"ERROR: Backup {backup_id} has empty or " + "mangled database_map, partial restore " + "is impossible") + + self.pb.restore_node('node', node_restored, + options=["--db-exclude=db1", '--no-validate'], + expect_error="because database_map is empty") + self.assertMessage(contains=f"ERROR: Backup {backup_id} has empty or " + "mangled database_map, partial restore " + "is impossible") + + self.corrupt_backup_file(backup_dir, 'node', backup_id, + 'database/database_map', overwrite=b'42') + + self.pb.restore_node('node', node_restored, + options=["--db-include=db1", '--no-validate'], + expect_error="because database_map is corrupted") + self.assertMessage(contains='ERROR: backup_content.control file has ' + 'invalid format in line 42') + + self.pb.restore_node('node', node_restored, + options=["--db-exclude=db1", '--no-validate'], + expect_error="because database_map is corrupted") + self.assertMessage(contains='ERROR: backup_content.control file has ' + 'invalid format in line 42') # check that simple restore is still possible - self.restore_node( - backup_dir, 'node', node_restored, options=['--no-validate']) + self.pb.restore_node('node', node_restored, options=['--no-validate']) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -3026,15 +2524,13 @@ def test_empty_and_mangled_database_map(self): def test_missing_database_map(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) + ptrack_enable=self.ptrack) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() @@ -3048,81 +2544,8 @@ def test_missing_database_map(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 && < 15 - elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: + # PG < 15 + if self.pg_config_version >= 100000 and self.pg_config_version < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -3207,7 +2630,7 @@ def test_missing_database_map(self): "GRANT USAGE ON SCHEMA ptrack TO backup; " "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - if ProbackupTest.enterprise: + if ProbackupTest.pgpro: node.safe_psql( "backupdb", @@ -3215,53 +2638,29 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # FULL backup without database_map - backup_id = self.backup_node( - backup_dir, 'node', node, datname='backupdb', + backup_id = self.pb.backup_node('node', node, datname='backupdb', options=['--stream', "-U", "backup", '--log-level-file=verbose']) pgdata = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() # backup has missing database_map and that is legal - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-exclude=db5", "--db-exclude=db9"]) - self.assertEqual( - 1, 0, - "Expecting Error because user do not have pg_database access.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} doesn't contain a database_map, " - "partial restore is impossible.".format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.restore_node( - backup_dir, 'node', node_restored, - options=["--db-include=db1"]) - self.assertEqual( - 1, 0, - "Expecting Error because user do not have pg_database access.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} doesn't contain a database_map, " - "partial restore is impossible.".format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.restore_node('node', node_restored, + options=["--db-exclude=db5", "--db-exclude=db9"], + expect_error="because user do not have pg_database access") + self.assertMessage(contains=f"ERROR: Backup {backup_id} doesn't contain a database_map, " + "partial restore is impossible.") + + self.pb.restore_node('node', node_restored, + options=["--db-include=db1"], + expect_error="because user do not have pg_database access") + self.assertMessage(contains=f"ERROR: Backup {backup_id} doesn't contain a database_map, " + "partial restore is impossible.") # check that simple restore is still possible - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -3280,20 +2679,18 @@ def test_stream_restore_command_option(self): as replica, check that PostgreSQL recovery uses restore_command to obtain WAL from archive. """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={'max_wal_size': '32MB'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(node.data_dir, 'postgresql.auto.conf') with open(recovery_conf, 'r') as f: print(f.read()) @@ -3301,8 +2698,7 @@ def test_stream_restore_command_option(self): recovery_conf = os.path.join(node.data_dir, 'recovery.conf') # Take FULL - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.pgbench_init(scale=5) @@ -3314,10 +2710,9 @@ def test_stream_restore_command_option(self): node.cleanup() shutil.rmtree(os.path.join(node.logs_dir)) - restore_cmd = self.get_restore_command(backup_dir, 'node', node) + restore_cmd = self.get_restore_command(backup_dir, 'node') - self.restore_node( - backup_dir, 'node', node, + self.pb.restore_node('node', node, options=[ '--restore-command={0}'.format(restore_cmd)]) @@ -3325,7 +2720,7 @@ def test_stream_restore_command_option(self): os.path.isfile(recovery_conf), "File '{0}' do not exists".format(recovery_conf)) - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_signal = os.path.join(node.data_dir, 'recovery.signal') self.assertTrue( os.path.isfile(recovery_signal), @@ -3347,40 +2742,36 @@ def test_stream_restore_command_option(self): def test_restore_primary_conninfo(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # Take FULL - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.pgbench_init(scale=1) #primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() str_conninfo='host=192.168.1.50 port=5432 user=foo password=foopass' - self.restore_node( - backup_dir, 'node', replica, + self.pb.restore_node('node', replica, options=['-R', '--primary-conninfo={0}'.format(str_conninfo)]) - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): standby_signal = os.path.join(replica.data_dir, 'standby.signal') self.assertTrue( os.path.isfile(standby_signal), "File '{0}' do not exists".format(standby_signal)) # TODO update test - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): recovery_conf = os.path.join(replica.data_dir, 'postgresql.auto.conf') with open(recovery_conf, 'r') as f: print(f.read()) @@ -3396,36 +2787,32 @@ def test_restore_primary_conninfo(self): def test_restore_primary_slot_info(self): """ """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # Take FULL - self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.pgbench_init(scale=1) - replica = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'replica')) + replica = self.pg_node.make_simple('replica') replica.cleanup() node.safe_psql( "SELECT pg_create_physical_replication_slot('master_slot')") - self.restore_node( - backup_dir, 'node', replica, + self.pb.restore_node('node', replica, options=['-R', '--primary-slot-name=master_slot']) - self.set_auto_conf(replica, {'port': replica.port}) - self.set_auto_conf(replica, {'hot_standby': 'on'}) + replica.set_auto_conf({'port': replica.port}) + replica.set_auto_conf({'hot_standby': 'on'}) - if self.get_version(node) >= self.version_to_num('12.0'): + if self.pg_config_version >= self.version_to_num('12.0'): standby_signal = os.path.join(replica.data_dir, 'standby.signal') self.assertTrue( os.path.isfile(standby_signal), @@ -3437,14 +2824,12 @@ def test_issue_249(self): """ https://github.com/postgrespro/pg_probackup/issues/249 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -3466,24 +2851,20 @@ def test_issue_249(self): 'select * from pgbench_accounts') # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( 'postgres', 'INSERT INTO pgbench_accounts SELECT * FROM t1') # restore FULL backup - node_restored_1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) + node_restored_1 = self.pg_node.make_simple('node_restored_1') node_restored_1.cleanup() - self.restore_node( - backup_dir, 'node', + self.pb.restore_node('node', node_restored_1, options=["--db-include=db1"]) - self.set_auto_conf( - node_restored_1, - {'port': node_restored_1.port, 'hot_standby': 'off'}) + node_restored_1.set_auto_conf({'port': node_restored_1.port, 'hot_standby': 'off'}) node_restored_1.slow_start() @@ -3513,18 +2894,16 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): if self.version_to_num(self.old_probackup_version) >= self.version_to_num('2.4.5'): self.assertTrue(False, 'You need pg_probackup < 2.4.5 for this test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node, old_binary=True) + self.pb.backup_node('node', node, old_binary=True) node.pgbench_init(scale=5) @@ -3541,8 +2920,7 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): node.cleanup() - self.restore_node( - backup_dir, 'node',node, + self.pb.restore_node('node',node, options=[ "--recovery-target-time={0}".format(time), "--recovery-target-action=promote"], @@ -3550,7 +2928,7 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): node.slow_start() - self.backup_node(backup_dir, 'node', node, old_binary=True) + self.pb.backup_node('node', node, old_binary=True) node.pgbench_init(scale=5) @@ -3560,8 +2938,7 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): node.cleanup() - self.restore_node( - backup_dir, 'node',node, + self.pb.restore_node('node',node, options=[ "--recovery-target-xid={0}".format(xid), "--recovery-target-action=promote"]) @@ -3578,29 +2955,26 @@ def test_drop_postgresql_auto_conf(self): if self.pg_config_version < self.version_to_num('12.0'): self.skipTest('You need PostgreSQL >= 12 for this test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # drop postgresql.auto.conf auto_path = os.path.join(node.data_dir, "postgresql.auto.conf") os.remove(auto_path) - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') node.cleanup() - self.restore_node( - backup_dir, 'node',node, + self.pb.restore_node('node',node, options=[ "--recovery-target=latest", "--recovery-target-action=promote"]) @@ -3619,30 +2993,27 @@ def test_truncate_postgresql_auto_conf(self): if self.pg_config_version < self.version_to_num('12.0'): self.skipTest('You need PostgreSQL >= 12 for this test') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # truncate postgresql.auto.conf auto_path = os.path.join(node.data_dir, "postgresql.auto.conf") with open(auto_path, "w+") as f: f.truncate() - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') node.cleanup() - self.restore_node( - backup_dir, 'node',node, + self.pb.restore_node('node',node, options=[ "--recovery-target=latest", "--recovery-target-action=promote"]) @@ -3651,54 +3022,46 @@ def test_truncate_postgresql_auto_conf(self): self.assertTrue(os.path.exists(auto_path)) # @unittest.skip("skip") + @needs_gdb def test_concurrent_restore(self): """""" - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=1) # FULL backup - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=['--stream', '--compress']) pgbench = node.pgbench(options=['-T', '7', '-c', '1', '--no-vacuum']) pgbench.wait() # DELTA backup - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', options=['--stream', '--compress', '--no-validate']) pgdata1 = self.pgdata_content(node.data_dir) - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node.cleanup() node_restored.cleanup() - gdb = self.restore_node( - backup_dir, 'node', node, options=['--no-validate'], gdb=True) + gdb = self.pb.restore_node('node', node, options=['--no-validate'], gdb=True) gdb.set_breakpoint('restore_data_file') gdb.run_until_break() - self.restore_node( - backup_dir, 'node', node_restored, options=['--no-validate']) + self.pb.restore_node('node', node_restored, options=['--no-validate']) - gdb.remove_all_breakpoints() gdb.continue_execution_until_exit() pgdata2 = self.pgdata_content(node.data_dir) @@ -3707,44 +3070,241 @@ def test_concurrent_restore(self): self.compare_pgdata(pgdata1, pgdata2) self.compare_pgdata(pgdata2, pgdata3) - # skip this test until https://github.com/postgrespro/pg_probackup/pull/399 - @unittest.skip("skip") + + # @unittest.skip("skip") + def test_restore_with_waldir(self): + """recovery using tablespace-mapping option and page backup""" + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + with node.connect("postgres") as con: + con.execute( + "CREATE TABLE tbl AS SELECT * " + "FROM generate_series(0,3) AS integer") + con.commit() + + # Full backup + backup_id = self.pb.backup_node('node', node) + + node.stop() + node.cleanup() + + # Create waldir + waldir_path = os.path.join(node.base_dir, "waldir") + os.makedirs(waldir_path) + + # Test recovery from latest + restore_result = self.pb.restore_node('node', node, + options=[ + "-X", "%s" % (waldir_path)]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) + node.slow_start() + + count = node.execute("postgres", "SELECT count(*) FROM tbl") + self.assertEqual(count[0][0], 4) + + # check pg_wal is symlink + if node.major_version >= 10: + wal_path=os.path.join(node.data_dir, "pg_wal") + else: + wal_path=os.path.join(node.data_dir, "pg_xlog") + + self.assertEqual(os.path.islink(wal_path), True) + + def test_restore_with_sync(self): + """ + By default our tests use --no-sync to speed up. + This test runs full backup and then `restore' both with fsync enabled. + """ + node = self.pg_node.make_simple('node') + self.pb.init() + self.pb.add_instance('node', node) + node.slow_start() + + node.execute("postgres", "CREATE TABLE tbl AS SELECT i as id FROM generate_series(0,3) AS i") + + backup_id = self.pb.backup_node('node', node, options=["--stream", "-j", "10"], sync=True) + + node.stop() + node.cleanup() + + restore_result = self.pb.restore_node('node', node, options=["-j", "10"], sync=True) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) + + node.slow_start() + + count = node.execute("postgres", "SELECT count(*) FROM tbl") + self.assertEqual(count[0][0], 4) + + def test_restore_target_time(self): + """ + Test that we can restore to the time which we list + as a recovery time for a backup. + """ + node = self.pg_node.make_simple('node') + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + node.safe_psql("postgres", "CREATE TABLE table_1(i int)") + node.safe_psql("postgres", "INSERT INTO table_1 values (42)") + node.safe_psql("postgres", "select pg_create_restore_point('savepoint');") + + backup_id = self.pb.backup_node('node', node) + node.safe_psql("postgres", "select txid_current();") + + node.cleanup() + + backup = self.pb.show('node', backup_id) + target_time = backup['recovery-time'] + + self.pb.restore_node('node', node, options=[f'--recovery-target-time={target_time}', + '--recovery-target-action=promote',]) + + node.slow_start() + + with node.connect("postgres") as con: + res = con.execute("SELECT * from table_1")[0][0] + self.assertEqual(42, res) + + def test_restore_after_failover(self): + """ + PITR: Check that we are able to restore to a correct timeline by replaying + the WALs even though the backup was made on a different timeline. + + Insert some data on master (D0). Take a backup. Failover to replica. + Insert some more data (D1). Record this moment as a PITR target. Insert some more data (D2). + Recover to PITR target. Make sure D1 exists, while D2 does not. + + JIRA: PBCKP-588 + """ + master = self.pg_node.make_simple('master', set_replication=True) + self.pb.init() + self.pb.add_instance('master', master) + # Streaming is not enough. We need full WALs to restore to a point in time later than the backup itself + self.pb.set_archiving('master', master) + master.slow_start() + + self.pb.backup_node('master', master, backup_type='full', options=['--stream']) + + replica = self.pg_node.make_simple('replica') + replica.cleanup() + + master.safe_psql("SELECT pg_create_physical_replication_slot('master_slot')") + + self.pb.restore_node( + 'master', replica, + options=['-R', '--primary-slot-name=master_slot']) + + replica.set_auto_conf({'port': replica.port}) + replica.set_auto_conf({'hot_standby': 'on'}) + + if self.pg_config_version >= self.version_to_num('12.0'): + standby_signal = os.path.join(replica.data_dir, 'standby.signal') + self.assertTrue( + os.path.isfile(standby_signal), + f"File '{standby_signal}' does not exist") + + replica.slow_start(replica=True) + with master.connect("postgres") as con: + master_timeline = con.execute("SELECT timeline_id FROM pg_control_checkpoint()")[0][0] + self.assertNotEqual(master_timeline, 0) + + # Now we have master<=>standby setup. + master.safe_psql("postgres", "CREATE TABLE t1 (a int, b int)") + master.safe_psql("postgres", "INSERT INTO t1 SELECT i/100, i/500 FROM generate_series(1,100000) s(i)") + + # Make a backup on timeline 1 with most of the data missing + self.pb.backup_node('master', master, backup_type='full', options=['--stream']) + + # For debugging purpose it was useful to have an incomplete commit in WAL. Might not be needed anymore + psql_path = testgres_utils.get_bin_path("psql") + os.spawnlp(os.P_NOWAIT, psql_path, psql_path, "-p", str(master.port), "-h", master.host, "-d", "postgres", + "-X", "-A", "-t", "-q", "-c", + "INSERT INTO t1 SELECT i/100, i/500 FROM generate_series(1,1000000) s(i)" + ) + + master.stop(["-m", "immediate"]) + sleep(1) + replica.promote() + + with replica.connect("postgres") as con: + replica_timeline = con.execute("SELECT min_recovery_end_timeline FROM pg_control_recovery()")[0][0] + self.assertNotEqual(master_timeline, replica_timeline) + + # Add some more on timeline 2 + replica.safe_psql("postgres", "CREATE TABLE t2 (a int, b int)") + replica.safe_psql("postgres", f"INSERT INTO t2 SELECT i/100, i/500 FROM generate_series(1,{MAGIC_COUNT}) s(i)") + + # Find out point-in-time where we would like to restore to + with replica.connect("postgres") as con: + restore_time = con.execute("SELECT now(), txid_current();")[0][0] + + # Break MAGIC_COUNT. An insert which should not be restored + replica.safe_psql("postgres", "INSERT INTO t2 SELECT i/100, i/500 FROM generate_series(1,100000) s(i)") + + replica.safe_psql("postgres", "SELECT pg_switch_wal();") + + # Final restore. We expect to find only the data up to {restore_time} and nothing else + node_restored = self.pg_node.make_simple("node_restored") + node_restored.cleanup() + self.pb.restore_node('master', node_restored, options=[ + '--no-validate', + f'--recovery-target-time={restore_time}', + f'--recovery-target-timeline={replica_timeline}', # As per ticket we do not parse WALs. User supplies timeline manually + '--recovery-target-action=promote', + '-j', '4', + ]) + node_restored.set_auto_conf({'port': node_restored.port}) + node_restored.slow_start() + + with node_restored.connect("postgres") as con: + nrows = con.execute("SELECT COUNT(*) from t2")[0][0] + self.assertEqual(MAGIC_COUNT, nrows) + + # @unittest.skip("skip") + @needs_gdb def test_restore_issue_313(self): """ Check that partially restored PostgreSQL instance cannot be started """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) node.cleanup() count = 0 - filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) + filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) for file in filelist: # count only nondata files - if int(filelist[file]['is_datafile']) == 0 and int(filelist[file]['size']) > 0: + if int(filelist[file]['is_datafile']) == 0 and \ + filelist[file]['kind'] != 'dir' and \ + file != 'database_map': count += 1 - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) - gdb = self.restore_node(backup_dir, 'node', node, gdb=True, options=['--progress']) + gdb = self.pb.restore_node('node', node, gdb=True, options=['--progress']) gdb.verbose = False gdb.set_breakpoint('restore_non_data_file') gdb.run_until_break() - gdb.continue_execution_until_break(count - 2) + gdb.continue_execution_until_break(count - 1) gdb.quit() # emulate the user or HA taking care of PG configuration @@ -3767,54 +3327,254 @@ def test_restore_issue_313(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + self.assertIn( + "postgres: could not find the database system", + f.read()) + + # @unittest.skip("skip") - def test_restore_with_waldir(self): - """recovery using tablespace-mapping option and page backup""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + def test_restore_to_latest_timeline(self): + """recovery to latest timeline""" + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() + node.pgbench_init(scale=2) - with node.connect("postgres") as con: - con.execute( - "CREATE TABLE tbl AS SELECT * " - "FROM generate_series(0,3) AS integer") - con.commit() - - # Full backup - backup_id = self.backup_node(backup_dir, 'node', node) + before1 = node.table_checksum("pgbench_branches") + backup_id = self.pb.backup_node('node', node) node.stop() node.cleanup() - # Create waldir - waldir_path = os.path.join(node.base_dir, "waldir") - os.makedirs(waldir_path) + restore_result = self.pb.restore_node('node', node, options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) - # Test recovery from latest - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id), - self.restore_node( - backup_dir, 'node', node, + node.slow_start() + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '2', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + before2 = node.table_checksum("pgbench_branches") + self.pb.backup_node('node', node) + + node.stop() + node.cleanup() + # restore from first backup + restore_result = self.pb.restore_node('node', node, options=[ - "-X", "%s" % (waldir_path)]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + "-j", "4", "--recovery-target-timeline=latest", "-i", backup_id] + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "latest") + # check recovery-target=latest option for compatibility with previous versions + node.cleanup() + restore_result = self.pb.restore_node('node', node, + options=[ + "-j", "4", "--recovery-target=latest", "-i", backup_id] + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "latest") + + # start postgres and promote wal files to latest timeline node.slow_start() - count = node.execute("postgres", "SELECT count(*) FROM tbl") - self.assertEqual(count[0][0], 4) + # check for the latest updates + after = node.table_checksum("pgbench_branches") + self.assertEqual(before2, after) - # check pg_wal is symlink - if node.major_version >= 10: - wal_path=os.path.join(node.data_dir, "pg_wal") - else: - wal_path=os.path.join(node.data_dir, "pg_xlog") + # checking recovery_target_timeline=current is the default option + if self.pg_config_version >= self.version_to_num('12.0'): + node.stop() + node.cleanup() - self.assertEqual(os.path.islink(wal_path), True) + # restore from first backup + restore_result = self.pb.restore_node('node', node, + options=[ + "-j", "4", "-i", backup_id] + ) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "current") + + # start postgres with current timeline + node.slow_start() + + # check for the current updates + after = node.table_checksum("pgbench_branches") + self.assertEqual(before1, after) + +################################################ +# dry-run +############################################### + @unittest.skipUnless(fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_basic_dry_run_restore(self): + """recovery dry-run """ + node = self.pg_node.make_simple('node') + + # check external directory with dry_run + external_dir = os.path.join(self.test_path, 'somedirectory') + os.mkdir(external_dir) + + new_external_dir=os.path.join(self.test_path, "restored_external_dir") + # fill external directory with data + f = open(os.path.join(external_dir, "very_important_external_file"), 'x') + f.close() + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + node.pgbench_init(scale=2) + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + backup_id = self.pb.backup_node('node', node, options=["--external-dirs={0}".format(external_dir)]) + + node.stop() + + # check data absence + restore_dir = os.path.join(self.test_path, "restored_dir") + if fs_backup_class.is_file_based: #AccessPath check is always true on s3 + dir_mode = os.stat(self.test_path).st_mode + os.chmod(self.test_path, 0o500) + + # 1 - Test recovery from latest without permissions + error_message = self.pb.restore_node('node', restore_dir=restore_dir, + options=["-j", "4", + "--external-mapping={0}={1}".format(external_dir, new_external_dir), + "--dry-run"], expect_error ='because of changed permissions') + try: + self.assertMessage(error_message, contains='ERROR: Check permissions') + finally: + # Cleanup + os.chmod(self.test_path, dir_mode) + + instance_before = self.pgdata_content(self.backup_dir) + # 2 - Test recovery from latest + restore_result = self.pb.restore_node('node', restore_dir=restore_dir, + options=["-j", "4", + "--external-mapping={0}={1}".format(external_dir, new_external_dir), + "--dry-run"]) + + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed in dry-run mode".format(backup_id)) + + instance_after = self.pgdata_content(self.backup_dir) + pgdata_after = self.pgdata_content(restore_dir) + + self.compare_instance_dir( + instance_before, + instance_after + ) + + # check external directory absence + self.assertFalse(os.path.exists(new_external_dir)) + + self.assertFalse(os.path.exists(restore_dir)) + + + @unittest.skipUnless(fs_backup_class.is_file_based, "AccessPath check is always true on s3") + def test_basic_dry_run_incremental_restore(self): + """incremental recovery with system_id mismatch and --force flag in --dry-run mode""" + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + node.pgbench_init(scale=2) + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + backup_id = self.pb.backup_node('node', node) + + node.stop() + # check data absence + restore_dir = os.path.join(self.test_path, "restored_dir") + + # 1 - recovery from latest + restore_result = self.pb.restore_node('node', + restore_dir=restore_dir, + options=["-j", "4"]) + self.assertMessage(restore_result, contains="INFO: Restore of backup {0} completed.".format(backup_id)) + + # Make some changes + node.slow_start() + + node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pgbench.wait() + pgbench.stdout.close() + backup_id = self.pb.backup_node('node', node, options=["--stream", + "-b DELTA"]) + node.stop() + + pg_probackup_conf = os.path.join(self.backup_dir, "backups/node/pg_probackup.conf") + + # make system_id mismatch + with open(pg_probackup_conf, 'r') as file: + data = file.read() + + match = re.search(r'(system-identifier)( = )([0-9]+)(\n)', data) + if match: + data = data.replace(match.group(3), '1111111111111111111') + + with open(pg_probackup_conf, 'w') as file: + + file.write(data) + + instance_before = self.pgdata_content(self.backup_dir) + pgdata_before = self.pgdata_content(restore_dir) + if fs_backup_class.is_file_based: #AccessPath check is always true on s3 + # Access check suite if disk mounted as read_only + dir_mode = os.stat(restore_dir).st_mode + os.chmod(restore_dir, 0o500) + + # 2 - incremetal recovery from latest without permissions + try: + error_message = self.pb.restore_node('node', + restore_dir=restore_dir, + options=["-j", "4", + "--dry-run", + "--force", + "-I", "checksum"], expect_error='because of changed permissions') + self.assertMessage(error_message, contains='ERROR: Check permissions') + finally: + # Cleanup + os.chmod(restore_dir, dir_mode) + + self.pb.restore_node('node', + restore_dir=restore_dir, + options=["-j", "4", + "--dry-run", + "--force", + "-I", "checksum"]) + instance_after = self.pgdata_content(self.backup_dir) + pgdata_after = self.pgdata_content(restore_dir) + + self.compare_instance_dir( + instance_before, + instance_after + ) + self.compare_pgdata( + pgdata_before, + pgdata_after + ) + + node.stop() diff --git a/tests/retention_test.py b/tests/retention_test.py index 88432a00f..8a462624f 100644 --- a/tests/retention_test.py +++ b/tests/retention_test.py @@ -1,47 +1,45 @@ -import os import unittest from datetime import datetime, timedelta -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest +from .helpers.ptrack_helpers import fs_backup_class +from pg_probackup2.gdb import needs_gdb +from .helpers.data_helpers import tail_file from time import sleep -from distutils.dir_util import copy_tree +import os.path -class RetentionTest(ProbackupTest, unittest.TestCase): +class RetentionTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure def test_retention_redundancy_1(self): """purge backups using redundancy-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=1']) + self.pb.set_config('node', options=['--retention-redundancy=1']) # Make backups to be purged - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type="page") # Make backups to be keeped - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type="page") - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + self.assertEqual(len(self.pb.show('node')), 4) - output_before = self.show_archive(backup_dir, 'node', tli=1) + output_before = self.pb.show_archive('node', tli=1) # Purge backups - self.delete_expired( - backup_dir, 'node', options=['--expired', '--wal']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + self.pb.delete_expired('node', options=['--expired', '--wal']) + self.assertEqual(len(self.pb.show('node')), 2) - output_after = self.show_archive(backup_dir, 'node', tli=1) + output_after = self.pb.show_archive('node', tli=1) self.assertEqual( output_before['max-segno'], @@ -55,183 +53,241 @@ def test_retention_redundancy_1(self): min_wal = output_after['min-segno'] max_wal = output_after['max-segno'] - for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')): - if not wal_name.endswith(".backup"): - - if self.archive_compress: - wal_name = wal_name[-27:] - wal_name = wal_name[:-3] - else: - wal_name = wal_name[-24:] - - self.assertTrue(wal_name >= min_wal) - self.assertTrue(wal_name <= max_wal) + wals = self.get_instance_wal_list(backup_dir, 'node') + for wal_name in wals: + if self.archive_compress and wal_name.endswith(self.compress_suffix): + wal_name = wal_name[:-len(self.compress_suffix)] + self.assertGreaterEqual(wal_name, min_wal) + self.assertLessEqual(wal_name, max_wal) # @unittest.skip("skip") def test_retention_window_2(self): """purge backups using window-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - with open( - os.path.join( - backup_dir, - 'backups', - 'node', - "pg_probackup.conf"), "a") as conf: - conf.write("retention-redundancy = 1\n") - conf.write("retention-window = 1\n") + with self.modify_backup_config(backup_dir, 'node') as cf: + cf.data += "retention-redundancy = 1\n" + cf.data += "retention-window = 1\n" # Make backups to be purged - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type="page") # Make backup to be keeped - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - backups = os.path.join(backup_dir, 'backups', 'node') days_delta = 5 - for backup in os.listdir(backups): - if backup == 'pg_probackup.conf': - continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=days_delta))) - days_delta -= 1 + for backup_id in backup_dir.list_instance_backups('node'): + with self.modify_backup_control(backup_dir, 'node', backup_id) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=days_delta)) + days_delta -= 1 # Make backup to be keeped - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + self.assertEqual(len(self.pb.show('node')), 4) # Purge backups - self.delete_expired(backup_dir, 'node', options=['--expired']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + self.pb.delete_expired('node', options=['--expired']) + self.assertEqual(len(self.pb.show('node')), 2) # @unittest.skip("skip") def test_retention_window_3(self): """purge all backups using window-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL BACKUP - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # Take second FULL BACKUP - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # Take third FULL BACKUP - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup == 'pg_probackup.conf': - continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) + for backup in backup_dir.list_instance_backups('node'): + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) # Purge backups - self.delete_expired( - backup_dir, 'node', options=['--retention-window=1', '--expired']) + self.pb.delete_expired('node', options=['--retention-window=1', '--expired']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 0) + self.assertEqual(len(self.pb.show('node')), 0) - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) # count wal files in ARCHIVE # @unittest.skip("skip") def test_retention_window_4(self): """purge all backups using window-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL BACKUPs - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - backup_id_2 = self.backup_node(backup_dir, 'node', node) + backup_id_2 = self.pb.backup_node('node', node) - backup_id_3 = self.backup_node(backup_dir, 'node', node) + backup_id_3 = self.pb.backup_node('node', node) - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup == 'pg_probackup.conf': - continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) + for backup in backup_dir.list_instance_backups('node'): + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) - self.delete_pb(backup_dir, 'node', backup_id_2) - self.delete_pb(backup_dir, 'node', backup_id_3) + self.pb.delete('node', backup_id_2) + self.pb.delete('node', backup_id_3) # Purge backups - self.delete_expired( - backup_dir, 'node', + self.pb.delete_expired( + 'node', options=['--retention-window=1', '--expired', '--wal']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 0) + self.assertEqual(len(self.pb.show('node')), 0) - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) # count wal files in ARCHIVE - wals_dir = os.path.join(backup_dir, 'wal', 'node') - # n_wals = len(os.listdir(wals_dir)) + wals = self.get_instance_wal_list(backup_dir, 'node') + self.assertFalse(wals) + + @unittest.skipIf(not fs_backup_class.is_file_based, "Locks are not implemented in cloud") + @needs_gdb + def test_concurrent_retention_1(self): + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + with self.modify_backup_config(backup_dir, 'node') as cf: + cf.data += "retention-redundancy = 1\n" + cf.data += "retention-window = 2\n" + + # Fill with data + node.pgbench_init(scale=1) + + full_id = self.pb.backup_node('node', node, backup_type="full") + + pgbench = node.pgbench(options=['-t', '20', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20', '-c', '2']) + pgbench.wait() + + prev_id = self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20', '-c', '2']) + pgbench.wait() + + last_id = self.pb.backup_node('node', node, backup_type="page") + + days_delta = 4 + for backup_id in backup_dir.list_instance_backups('node'): + with self.modify_backup_control(backup_dir, 'node', backup_id) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=days_delta)) + days_delta -= 1 + + gdb = self.pb.backup_node('node', node, gdb=True, + options=['--merge-expired']) + gdb.set_breakpoint("merge_chain") + gdb.run_until_break() + + self.pb.backup_node('node', node, + options=['--merge-expired'], + expect_error="because of concurrent merge") + self.assertMessage(contains=f"ERROR: Cannot lock backup {full_id}") + + @needs_gdb + def test_concurrent_retention_2(self): + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + with self.modify_backup_config(backup_dir, 'node') as cf: + cf.data += "retention-redundancy = 1\n" + cf.data += "retention-window = 2\n" + + # Fill with data + node.pgbench_init(scale=1) + + full_id = self.pb.backup_node('node', node, backup_type="full") + + pgbench = node.pgbench(options=['-t', '20', '-c', '2']) + pgbench.wait() + + self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20', '-c', '2']) + pgbench.wait() + + prev_id = self.pb.backup_node('node', node, backup_type="page") + + pgbench = node.pgbench(options=['-t', '20', '-c', '2']) + pgbench.wait() + + last_id = self.pb.backup_node('node', node, backup_type="page") - # self.assertTrue(n_wals > 0) + days_delta = 4 + for backup_id in backup_dir.list_instance_backups('node'): + with self.modify_backup_control(backup_dir, 'node', backup_id) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=days_delta)) + days_delta -= 1 - # self.delete_expired( - # backup_dir, 'node', - # options=['--retention-window=1', '--expired', '--wal']) + gdb = self.pb.backup_node('node', node, gdb=True, + options=['--merge-expired']) + gdb.set_breakpoint("merge_files") + gdb.run_until_break() - # count again - n_wals = len(os.listdir(wals_dir)) - self.assertTrue(n_wals == 0) + out = self.pb.backup_node('node', node, + options=['--merge-expired'],return_id=False) + #expect_error="because of concurrent merge") + self.assertMessage(out, contains=f"WARNING: Backup {full_id} is not in stable state") + self.assertMessage(out, contains=f"There are no backups to merge by retention policy") # @unittest.skip("skip") def test_window_expire_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) + backup_id_b = self.pb.backup_node('node', node) # Change FULLb backup status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') @@ -240,8 +296,7 @@ def test_window_expire_interleaved_incremental_chains(self): # FULLa OK # Take PAGEa1 backup - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') # PAGEa1 OK # FULLb ERROR @@ -258,8 +313,7 @@ def test_window_expire_interleaved_incremental_chains(self): # FULLb OK # FULLa ERROR - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b1 = self.pb.backup_node('node', node, backup_type='page') # PAGEb1 OK # PAGEa1 ERROR @@ -280,8 +334,7 @@ def test_window_expire_interleaved_incremental_chains(self): # FULLb ERROR # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # PAGEa2 OK # PAGEb1 ERROR @@ -303,8 +356,7 @@ def test_window_expire_interleaved_incremental_chains(self): # FULLb OK # FULLa ERROR - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b2 = self.pb.backup_node('node', node, backup_type='page') # Change PAGEa2 and FULla to OK self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') @@ -318,40 +370,35 @@ def test_window_expire_interleaved_incremental_chains(self): # FULLa OK # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup not in [page_id_a2, page_id_b2, 'pg_probackup.conf']: - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - self.delete_expired( - backup_dir, 'node', + for backup in backup_dir.list_instance_backups('node'): + if backup in [page_id_a2, page_id_b2]: + continue + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) + + self.pb.delete_expired( + 'node', options=['--retention-window=1', '--expired']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 6) + self.assertEqual(len(self.pb.show('node')), 6) - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) # @unittest.skip("skip") def test_redundancy_expire_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) + backup_id_b = self.pb.backup_node('node', node) # Change FULL B backup status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') @@ -359,8 +406,7 @@ def test_redundancy_expire_interleaved_incremental_chains(self): # FULLb ERROR # FULLa OK # Take PAGEa1 backup - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') # PAGEa1 OK # FULLb ERROR @@ -377,8 +423,7 @@ def test_redundancy_expire_interleaved_incremental_chains(self): # FULLb OK # FULLa ERROR - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b1 = self.pb.backup_node('node', node, backup_type='page') # PAGEb1 OK # PAGEa1 ERROR @@ -398,8 +443,7 @@ def test_redundancy_expire_interleaved_incremental_chains(self): # PAGEa1 OK # FULLb ERROR # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # PAGEa2 OK # PAGEb1 ERROR @@ -420,7 +464,7 @@ def test_redundancy_expire_interleaved_incremental_chains(self): # PAGEa1 OK # FULLb OK # FULLa ERROR - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # Change PAGEa2 and FULLa status to OK self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') @@ -433,31 +477,28 @@ def test_redundancy_expire_interleaved_incremental_chains(self): # FULLb OK # FULLa OK - self.delete_expired( - backup_dir, 'node', + self.pb.delete_expired( + 'node', options=['--retention-redundancy=1', '--expired']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) + self.assertEqual(len(self.pb.show('node')), 3) - print(self.show_pb( - backup_dir, 'node', as_json=False, as_text=True)) + print(self.pb.show('node', as_json=False, as_text=True)) # @unittest.skip("skip") def test_window_merge_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) + backup_id_b = self.pb.backup_node('node', node) # Change FULLb backup status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') @@ -466,8 +507,7 @@ def test_window_merge_interleaved_incremental_chains(self): # FULLa OK # Take PAGEa1 backup - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') # PAGEa1 OK # FULLb ERROR @@ -483,8 +523,7 @@ def test_window_merge_interleaved_incremental_chains(self): # FULLb OK # FULLa OK - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b1 = self.pb.backup_node('node', node, backup_type='page') # PAGEb1 OK # PAGEa1 ERROR @@ -504,8 +543,7 @@ def test_window_merge_interleaved_incremental_chains(self): # FULLb ERROR # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # PAGEa2 OK # PAGEb1 ERROR @@ -527,8 +565,7 @@ def test_window_merge_interleaved_incremental_chains(self): # FULLb OK # FULLa ERROR - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b2 = self.pb.backup_node('node', node, backup_type='page') # Change PAGEa2 and FULLa to OK self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') @@ -542,17 +579,15 @@ def test_window_merge_interleaved_incremental_chains(self): # FULLa OK # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup not in [page_id_a2, page_id_b2, 'pg_probackup.conf']: - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - output = self.delete_expired( - backup_dir, 'node', + for backup in backup_dir.list_instance_backups('node'): + if backup in [page_id_a2, page_id_b2]: + continue + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) + + output = self.pb.delete_expired( + 'node', options=['--retention-window=1', '--expired', '--merge-expired']) self.assertIn( @@ -573,7 +608,7 @@ def test_window_merge_interleaved_incremental_chains(self): "Rename merged full backup {0} to {1}".format( backup_id_b, page_id_b2), output) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + self.assertEqual(len(self.pb.show('node')), 2) # @unittest.skip("skip") def test_window_merge_interleaved_incremental_chains_1(self): @@ -585,32 +620,29 @@ def test_window_merge_interleaved_incremental_chains_1(self): FULLb FULLa """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=5) # Take FULL BACKUPs - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-t', '20', '-c', '1']) pgbench.wait() - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-t', '20', '-c', '1']) pgbench.wait() # Change FULL B backup status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') pgdata_a1 = self.pgdata_content(node.data_dir) @@ -629,20 +661,17 @@ def test_window_merge_interleaved_incremental_chains_1(self): # PAGEa1 ERROR # FULLb OK # FULLa OK - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench(options=['-t', '20', '-c', '1']) pgbench.wait() - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench(options=['-t', '20', '-c', '1']) pgbench.wait() - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b3 = self.pb.backup_node('node', node, backup_type='page') pgdata_b3 = self.pgdata_content(node.data_dir) pgbench = node.pgbench(options=['-t', '20', '-c', '1']) @@ -666,56 +695,52 @@ def test_window_merge_interleaved_incremental_chains_1(self): # FULLa OK # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_a1, page_id_b3, 'pg_probackup.conf']: + for backup_id in backup_dir.list_instance_backups('node'): + if backup_id in [page_id_a1, page_id_b3]: continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) + with self.modify_backup_control(backup_dir, 'node', backup_id) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) - self.delete_expired( - backup_dir, 'node', + self.pb.delete_expired( + 'node', options=['--retention-window=1', '--expired', '--merge-expired']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + self.assertEqual(len(self.pb.show('node')), 2) self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['id'], + self.pb.show('node')[1]['id'], page_id_b3) self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['id'], + self.pb.show('node')[0]['id'], page_id_a1) self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['backup-mode'], + self.pb.show('node')[1]['backup-mode'], 'FULL') self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['backup-mode'], + self.pb.show('node')[0]['backup-mode'], 'FULL') node.cleanup() # Data correctness of PAGEa3 - self.restore_node(backup_dir, 'node', node, backup_id=page_id_a1) + self.pb.restore_node('node', node, backup_id=page_id_a1) pgdata_restored_a1 = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata_a1, pgdata_restored_a1) node.cleanup() # Data correctness of PAGEb3 - self.restore_node(backup_dir, 'node', node, backup_id=page_id_b3) + self.pb.restore_node('node', node, backup_id=page_id_b3) pgdata_restored_b3 = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata_b3, pgdata_restored_b3) - # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants(self): - """ + r""" PAGEb3 | PAGEa3 -----------------------------retention window @@ -726,32 +751,29 @@ def test_basic_window_merge_multiple_descendants(self): FULLb | FULLa """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=3) # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.pb.backup_node('node', node) # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() # Change FULLb backup status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() @@ -766,8 +788,7 @@ def test_basic_window_merge_multiple_descendants(self): # FULLb OK # FULLa OK - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b1 = self.pb.backup_node('node', node, backup_type='page') # PAGEb1 OK # PAGEa1 ERROR @@ -789,8 +810,7 @@ def test_basic_window_merge_multiple_descendants(self): # FULLb ERROR # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() @@ -815,8 +835,7 @@ def test_basic_window_merge_multiple_descendants(self): # FULLb OK # FULLa ERROR - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b2 = self.pb.backup_node('node', node, backup_type='page') # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() @@ -843,8 +862,7 @@ def test_basic_window_merge_multiple_descendants(self): # FULLb ERROR # FULLa OK - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a3 = self.pb.backup_node('node', node, backup_type='page') # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() @@ -864,8 +882,7 @@ def test_basic_window_merge_multiple_descendants(self): self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b3 = self.pb.backup_node('node', node, backup_type='page') # PAGEb3 OK # PAGEa3 ERROR @@ -892,34 +909,28 @@ def test_basic_window_merge_multiple_descendants(self): # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + self.pb.show('node', backup_id=page_id_a3)['parent-backup-id'], page_id_a1) self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + self.pb.show('node', backup_id=page_id_a2)['parent-backup-id'], page_id_a1) # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_a3, page_id_b3, 'pg_probackup.conf']: + for backup in backup_dir.list_instance_backups('node'): + if backup in [page_id_a3, page_id_b3]: continue + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - output = self.delete_expired( - backup_dir, 'node', + output = self.pb.delete_expired( + 'node', options=[ '--retention-window=1', '--delete-expired', '--merge-expired', '--log-level-console=log']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + self.assertEqual(len(self.pb.show('node')), 2) # Merging chain A self.assertIn( @@ -954,24 +965,24 @@ def test_basic_window_merge_multiple_descendants(self): "Delete: {0}".format(page_id_a2), output) self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['id'], + self.pb.show('node')[1]['id'], page_id_b3) self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['id'], + self.pb.show('node')[0]['id'], page_id_a3) self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['backup-mode'], + self.pb.show('node')[1]['backup-mode'], 'FULL') self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['backup-mode'], + self.pb.show('node')[0]['backup-mode'], 'FULL') # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants_1(self): - """ + r""" PAGEb3 | PAGEa3 -----------------------------retention window @@ -982,32 +993,29 @@ def test_basic_window_merge_multiple_descendants_1(self): FULLb | FULLa """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=3) # Take FULL BACKUPs - backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_a = self.pb.backup_node('node', node) # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() - backup_id_b = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.pb.backup_node('node', node) # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() # Change FULLb backup status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - page_id_a1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a1 = self.pb.backup_node('node', node, backup_type='page') # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() @@ -1022,8 +1030,7 @@ def test_basic_window_merge_multiple_descendants_1(self): # FULLb OK # FULLa OK - page_id_b1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b1 = self.pb.backup_node('node', node, backup_type='page') # PAGEb1 OK # PAGEa1 ERROR @@ -1045,8 +1052,7 @@ def test_basic_window_merge_multiple_descendants_1(self): # FULLb ERROR # FULLa OK - page_id_a2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a2 = self.pb.backup_node('node', node, backup_type='page') # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() @@ -1071,8 +1077,7 @@ def test_basic_window_merge_multiple_descendants_1(self): # FULLb OK # FULLa ERROR - page_id_b2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b2 = self.pb.backup_node('node', node, backup_type='page') # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() @@ -1099,8 +1104,7 @@ def test_basic_window_merge_multiple_descendants_1(self): # FULLb ERROR # FULLa OK - page_id_a3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_a3 = self.pb.backup_node('node', node, backup_type='page') # pgbench = node.pgbench(options=['-T', '10', '-c', '2']) # pgbench.wait() @@ -1120,8 +1124,7 @@ def test_basic_window_merge_multiple_descendants_1(self): self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + page_id_b3 = self.pb.backup_node('node', node, backup_type='page') # PAGEb3 OK # PAGEa3 ERROR @@ -1148,34 +1151,28 @@ def test_basic_window_merge_multiple_descendants_1(self): # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + self.pb.show('node', backup_id=page_id_a3)['parent-backup-id'], page_id_a1) self.assertEqual( - self.show_pb( - backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + self.pb.show('node', backup_id=page_id_a2)['parent-backup-id'], page_id_a1) # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_a3, page_id_b3, 'pg_probackup.conf']: + for backup in backup_dir.list_instance_backups('node'): + if backup in [page_id_a3, page_id_b3]: continue + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - output = self.delete_expired( - backup_dir, 'node', + output = self.pb.delete_expired( + 'node', options=[ '--retention-window=1', '--merge-expired', '--log-level-console=log']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) + self.assertEqual(len(self.pb.show('node')), 3) # Merging chain A self.assertIn( @@ -1202,36 +1199,35 @@ def test_basic_window_merge_multiple_descendants_1(self): backup_id_b, page_id_b3), output) self.assertEqual( - self.show_pb(backup_dir, 'node')[2]['id'], + self.pb.show('node')[2]['id'], page_id_b3) self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['id'], + self.pb.show('node')[1]['id'], page_id_a3) self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['id'], + self.pb.show('node')[0]['id'], page_id_a2) self.assertEqual( - self.show_pb(backup_dir, 'node')[2]['backup-mode'], + self.pb.show('node')[2]['backup-mode'], 'FULL') self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['backup-mode'], + self.pb.show('node')[1]['backup-mode'], 'FULL') self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['backup-mode'], + self.pb.show('node')[0]['backup-mode'], 'PAGE') - output = self.delete_expired( - backup_dir, 'node', + output = self.pb.delete_expired( + 'node', options=[ '--retention-window=1', '--delete-expired', '--log-level-console=log']) - # @unittest.skip("skip") def test_window_chains(self): """ PAGE @@ -1243,77 +1239,65 @@ def test_window_chains(self): PAGE FULL """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=3) # Chain A - self.backup_node(backup_dir, 'node', node) - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # Chain B - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + page_id_b3 = self.pb.backup_node('node', node, backup_type='delta') pgdata = self.pgdata_content(node.data_dir) # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_b3, 'pg_probackup.conf']: + for backup in backup_dir.list_instance_backups('node'): + if backup in [page_id_b3]: continue + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - self.delete_expired( - backup_dir, 'node', + self.pb.delete_expired( + 'node', options=[ '--retention-window=1', '--expired', '--merge-expired', '--log-level-console=log']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) + self.assertEqual(len(self.pb.show('node')), 1) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # @unittest.skip("skip") def test_window_chains_1(self): """ PAGE @@ -1325,59 +1309,48 @@ def test_window_chains_1(self): PAGE FULL """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=3) # Chain A - self.backup_node(backup_dir, 'node', node) - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # Chain B - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') - page_id_b3 = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + page_id_b3 = self.pb.backup_node('node', node, backup_type='delta') self.pgdata_content(node.data_dir) # Purge backups - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup in [page_id_b3, 'pg_probackup.conf']: + for backup in backup_dir.list_instance_backups('node'): + if backup in [page_id_b3]: continue + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - output = self.delete_expired( - backup_dir, 'node', + output = self.pb.delete_expired( + 'node', options=[ '--retention-window=1', '--merge-expired', '--log-level-console=log']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + self.assertEqual(len(self.pb.show('node')), 4) self.assertIn( "There are no backups to delete by retention policy", @@ -1387,13 +1360,13 @@ def test_window_chains_1(self): "Retention merging finished", output) - output = self.delete_expired( - backup_dir, 'node', + output = self.pb.delete_expired( + 'node', options=[ '--retention-window=1', '--expired', '--log-level-console=log']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 1) + self.assertEqual(len(self.pb.show('node')), 1) self.assertIn( "There are no backups to merge by retention policy", @@ -1415,28 +1388,25 @@ def test_window_error_backups(self): FULL -------redundancy """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL BACKUPs - self.backup_node(backup_dir, 'node', node) - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # Change FULLb backup status to ERROR # self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') # @unittest.skip("skip") + @needs_gdb def test_window_error_backups_1(self): """ DELTA @@ -1444,45 +1414,40 @@ def test_window_error_backups_1(self): FULL -------window """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL BACKUP - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # Take PAGE BACKUP - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='page', gdb=True) + gdb = self.pb.backup_node('node', node, backup_type='page', gdb=True) # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') + gdb.signal('SIGINT') gdb.continue_execution_until_error() - self.show_pb(backup_dir, 'node')[1]['id'] + self.pb.show('node')[1]['id'] # Take DELTA backup - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', options=['--retention-window=2', '--delete-expired']) # Take FULL BACKUP - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + self.assertEqual(len(self.pb.show('node')), 4) # @unittest.skip("skip") + @needs_gdb def test_window_error_backups_2(self): """ DELTA @@ -1490,281 +1455,220 @@ def test_window_error_backups_2(self): FULL -------window """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Take FULL BACKUP - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # Take PAGE BACKUP - gdb = self.backup_node( - backup_dir, 'node', node, backup_type='page', gdb=True) + gdb = self.pb.backup_node('node', node, backup_type='page', gdb=True) # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') gdb.continue_execution_until_error() - self.show_pb(backup_dir, 'node')[1]['id'] - - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'SELECT pg_catalog.pg_stop_backup()') + self.pb.show('node')[1]['id'] # Take DELTA backup - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', options=['--retention-window=2', '--delete-expired']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) + self.assertEqual(len(self.pb.show('node')), 3) + @needs_gdb def test_retention_redundancy_overlapping_chains(self): """""" - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - if self.get_version(node) < 90600: - self.skipTest('Skipped because ptrack support is disabled') - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=1']) + self.pb.set_config('node', options=['--retention-redundancy=1']) # Make backups to be purged - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type="page") # Make backups to be keeped - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_files') gdb.run_until_break() sleep(1) - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") - gdb.remove_all_breakpoints() gdb.continue_execution_until_exit() - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") # Purge backups - self.delete_expired( - backup_dir, 'node', options=['--expired', '--wal']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + self.pb.delete_expired( + 'node', options=['--expired', '--wal']) + self.assertEqual(len(self.pb.show('node')), 2) - self.validate_pb(backup_dir, 'node') + self.pb.validate('node') + @needs_gdb def test_retention_redundancy_overlapping_chains_1(self): """""" - self._check_gdb_flag_or_skip_test() - - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.skipTest('Skipped because ptrack support is disabled') + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=1']) + self.pb.set_config('node', options=['--retention-redundancy=1']) # Make backups to be purged - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type="page") # Make backups to be keeped - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_files') gdb.run_until_break() sleep(1) - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") - gdb.remove_all_breakpoints() gdb.continue_execution_until_exit() - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node, backup_type="page") # Purge backups - self.delete_expired( - backup_dir, 'node', options=['--expired', '--wal']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + self.pb.delete_expired( + 'node', options=['--expired', '--wal']) + self.assertEqual(len(self.pb.show('node')), 2) - self.validate_pb(backup_dir, 'node') + self.pb.validate('node') def test_wal_purge_victim(self): """ https://github.com/postgrespro/pg_probackup/issues/103 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # Make ERROR incremental backup - try: - self.backup_node(backup_dir, 'node', node, backup_type='page') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Valid full backup on current timeline 1 is not found" in e.message and - "ERROR: Create new full backup before an incremental one" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - page_id = self.show_pb(backup_dir, 'node')[0]['id'] + self.pb.backup_node('node', node, backup_type='page', + expect_error="because page backup should not be " + "possible without valid full backup") + self.assertMessage(contains="WARNING: Valid full backup on current timeline 1 is not found") + self.assertMessage(contains="ERROR: Create new full backup before an incremental one") + + page_id = self.pb.show('node')[0]['id'] sleep(1) # Make FULL backup - full_id = self.backup_node(backup_dir, 'node', node, options=['--delete-wal']) - - try: - self.validate_pb(backup_dir, 'node') - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "INFO: Backup {0} WAL segments are valid".format(full_id), - e.message) - self.assertIn( - "WARNING: Backup {0} has missing parent 0".format(page_id), - e.message) + full_id = self.pb.backup_node('node', node, options=['--delete-wal']) + + self.pb.validate('node', + expect_error="because page backup should not be " + "possible without valid full backup") + self.assertMessage(contains=f"INFO: Backup {full_id} WAL segments are valid") + self.assertMessage(contains=f"WARNING: Backup {page_id} has missing parent 0") # @unittest.skip("skip") + @needs_gdb def test_failed_merge_redundancy_retention(self): """ Check that retention purge works correctly with MERGING backups """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join( - self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL1 backup - full_id = self.backup_node(backup_dir, 'node', node) + full_id = self.pb.backup_node('node', node) # DELTA BACKUP - delta_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + delta_id = self.pb.backup_node('node', node, backup_type='delta') # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # FULL2 backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # FULL3 backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') # DELTA BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=2']) + self.pb.set_config('node', options=['--retention-redundancy=2']) - self.set_config( - backup_dir, 'node', options=['--retention-window=2']) + self.pb.set_config('node', options=['--retention-window=2']) # create pair of MERGING backup as a result of failed merge - gdb = self.merge_backup( - backup_dir, 'node', delta_id, gdb=True) + gdb = self.pb.merge_backup('node', delta_id, gdb=True) gdb.set_breakpoint('backup_non_data_file') gdb.run_until_break() gdb.continue_execution_until_break(2) - gdb._execute('signal SIGKILL') + gdb.signal('SIGKILL') # "expire" first full backup - backups = os.path.join(backup_dir, 'backups', 'node') - with open( - os.path.join( - backups, full_id, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) + with self.modify_backup_control(backup_dir, 'node', full_id) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) # run retention merge - self.delete_expired( - backup_dir, 'node', options=['--delete-expired']) + self.pb.delete_expired( + 'node', options=['--delete-expired']) self.assertEqual( 'MERGING', - self.show_pb(backup_dir, 'node', full_id)['status'], + self.pb.show('node', full_id)['status'], 'Backup STATUS should be "MERGING"') self.assertEqual( 'MERGING', - self.show_pb(backup_dir, 'node', delta_id)['status'], + self.pb.show('node', delta_id)['status'], 'Backup STATUS should be "MERGING"') - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 10) + self.assertEqual(len(self.pb.show('node')), 10) def test_wal_depth_1(self): """ @@ -1774,31 +1678,28 @@ def test_wal_depth_1(self): wal-depth=2 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s', 'checkpoint_timeout': '30s'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + self.pb.set_config('node', options=['--archive-timeout=60s']) node.slow_start() # FULL node.pgbench_init(scale=1) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # PAGE node.pgbench_init(scale=1) - B2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + B2 = self.pb.backup_node('node', node, backup_type='page') # generate_some more data node.pgbench_init(scale=1) @@ -1809,22 +1710,18 @@ def test_wal_depth_1(self): node.pgbench_init(scale=1) - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') node.pgbench_init(scale=1) - self.backup_node( - backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # Timeline 2 - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - output = self.restore_node( - backup_dir, 'node', node_restored, + output = self.pb.restore_node('node', node_restored, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-action=promote']) @@ -1833,7 +1730,7 @@ def test_wal_depth_1(self): 'Restore of backup {0} completed'.format(B2), output) - self.set_auto_conf(node_restored, options={'port': node_restored.port}) + node_restored.set_auto_conf(options={'port': node_restored.port}) node_restored.slow_start() @@ -1848,8 +1745,7 @@ def test_wal_depth_1(self): # Timeline 3 node_restored.cleanup() - output = self.restore_node( - backup_dir, 'node', node_restored, + output = self.pb.restore_node('node', node_restored, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=2', @@ -1859,24 +1755,23 @@ def test_wal_depth_1(self): 'Restore of backup {0} completed'.format(B2), output) - self.set_auto_conf(node_restored, options={'port': node_restored.port}) + node_restored.set_auto_conf(options={'port': node_restored.port}) node_restored.slow_start() node_restored.pgbench_init(scale=1) - self.backup_node( - backup_dir, 'node', node_restored, data_dir=node_restored.data_dir) + self.pb.backup_node('node', node_restored, data_dir=node_restored.data_dir) node.pgbench_init(scale=1) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - lsn = self.show_archive(backup_dir, 'node', tli=2)['switchpoint'] + lsn = self.pb.show_archive('node', tli=2)['switchpoint'] - self.validate_pb( - backup_dir, 'node', backup_id=B2, + self.pb.validate( + 'node', backup_id=B2, options=['--recovery-target-lsn={0}'.format(lsn)]) - self.validate_pb(backup_dir, 'node') + self.pb.validate('node') def test_wal_purge(self): """ @@ -1898,28 +1793,25 @@ def test_wal_purge(self): wal-depth=2 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_config('node', options=['--archive-timeout=60s']) node.slow_start() # STREAM FULL - stream_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + stream_id = self.pb.backup_node('node', node, options=['--stream']) node.stop() - self.set_archiving(backup_dir, 'node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL - B1 = self.backup_node(backup_dir, 'node', node) + B1 = self.pb.backup_node('node', node) node.pgbench_init(scale=1) target_xid = node.safe_psql( @@ -1928,20 +1820,18 @@ def test_wal_purge(self): node.pgbench_init(scale=5) # B2 FULL on TLI1 - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=4) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=4) - self.delete_pb(backup_dir, 'node', options=['--delete-wal']) + self.pb.delete('node', options=['--delete-wal']) # TLI 2 - node_tli2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) + node_tli2 = self.pg_node.make_simple('node_tli2') node_tli2.cleanup() - output = self.restore_node( - backup_dir, 'node', node_tli2, + output = self.pb.restore_node('node', node_tli2, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=1', @@ -1951,7 +1841,7 @@ def test_wal_purge(self): 'INFO: Restore of backup {0} completed'.format(B1), output) - self.set_auto_conf(node_tli2, options={'port': node_tli2.port}) + node_tli2.set_auto_conf(options={'port': node_tli2.port}) node_tli2.slow_start() node_tli2.pgbench_init(scale=4) @@ -1960,23 +1850,19 @@ def test_wal_purge(self): "select txid_current()").decode('utf-8').rstrip() node_tli2.pgbench_init(scale=1) - self.backup_node( - backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) + self.pb.backup_node('node', node_tli2, data_dir=node_tli2.data_dir) node_tli2.pgbench_init(scale=3) - self.backup_node( - backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) + self.pb.backup_node('node', node_tli2, data_dir=node_tli2.data_dir) node_tli2.pgbench_init(scale=1) node_tli2.cleanup() # TLI3 - node_tli3 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) + node_tli3 = self.pg_node.make_simple('node_tli3') node_tli3.cleanup() # Note, that successful validation here is a happy coincidence - output = self.restore_node( - backup_dir, 'node', node_tli3, + output = self.pb.restore_node('node', node_tli3, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=2', @@ -1985,56 +1871,57 @@ def test_wal_purge(self): self.assertIn( 'INFO: Restore of backup {0} completed'.format(B1), output) - self.set_auto_conf(node_tli3, options={'port': node_tli3.port}) + node_tli3.set_auto_conf(options={'port': node_tli3.port}) node_tli3.slow_start() node_tli3.pgbench_init(scale=5) node_tli3.cleanup() # TLI4 - node_tli4 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) + node_tli4 = self.pg_node.make_simple('node_tli4') node_tli4.cleanup() - self.restore_node( - backup_dir, 'node', node_tli4, backup_id=stream_id, + self.pb.restore_node('node', node_tli4, backup_id=stream_id, options=[ '--recovery-target=immediate', '--recovery-target-action=promote']) - self.set_auto_conf(node_tli4, options={'port': node_tli4.port}) - self.set_archiving(backup_dir, 'node', node_tli4) + node_tli4.set_auto_conf(options={'port': node_tli4.port}) + self.pb.set_archiving('node', node_tli4) node_tli4.slow_start() node_tli4.pgbench_init(scale=5) - self.backup_node( - backup_dir, 'node', node_tli4, data_dir=node_tli4.data_dir) + self.pb.backup_node('node', node_tli4, data_dir=node_tli4.data_dir) node_tli4.pgbench_init(scale=5) node_tli4.cleanup() # TLI5 - node_tli5 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) + node_tli5 = self.pg_node.make_simple('node_tli5') node_tli5.cleanup() - self.restore_node( - backup_dir, 'node', node_tli5, backup_id=stream_id, + self.pb.restore_node('node', node_tli5, backup_id=stream_id, options=[ '--recovery-target=immediate', '--recovery-target-action=promote']) - self.set_auto_conf(node_tli5, options={'port': node_tli5.port}) - self.set_archiving(backup_dir, 'node', node_tli5) + node_tli5.set_auto_conf(options={'port': node_tli5.port}) + self.pb.set_archiving('node', node_tli5) node_tli5.slow_start() node_tli5.pgbench_init(scale=10) # delete '.history' file of TLI4 - os.remove(os.path.join(backup_dir, 'wal', 'node', '00000004.history')) + self.remove_instance_wal(backup_dir, 'node', '00000004.history') # delete '.history' file of TLI5 - os.remove(os.path.join(backup_dir, 'wal', 'node', '00000005.history')) + self.wait_instance_wal_exists(backup_dir, 'node', '00000005.history') + self.remove_instance_wal(backup_dir, 'node', '00000005.history') + + tailer = tail_file(os.path.join(node_tli5.logs_dir, 'postgresql.log')) + tailer.wait(contains='LOG: pushing file "000000050000000000000007') + tailer.wait_archive_push_completed() + del tailer + node_tli5.stop() - output = self.delete_pb( - backup_dir, 'node', + output = self.pb.delete('node', options=[ '--delete-wal', '--dry-run', '--log-level-console=verbose']) @@ -2048,11 +1935,11 @@ def test_wal_purge(self): 'INFO: On timeline 5 all files can be removed', output) - show_tli1_before = self.show_archive(backup_dir, 'node', tli=1) - show_tli2_before = self.show_archive(backup_dir, 'node', tli=2) - show_tli3_before = self.show_archive(backup_dir, 'node', tli=3) - show_tli4_before = self.show_archive(backup_dir, 'node', tli=4) - show_tli5_before = self.show_archive(backup_dir, 'node', tli=5) + show_tli1_before = self.pb.show_archive('node', tli=1) + show_tli2_before = self.pb.show_archive('node', tli=2) + show_tli3_before = self.pb.show_archive('node', tli=3) + show_tli4_before = self.pb.show_archive('node', tli=4) + show_tli5_before = self.pb.show_archive('node', tli=5) self.assertTrue(show_tli1_before) self.assertTrue(show_tli2_before) @@ -2060,8 +1947,7 @@ def test_wal_purge(self): self.assertTrue(show_tli4_before) self.assertTrue(show_tli5_before) - output = self.delete_pb( - backup_dir, 'node', + output = self.pb.delete('node', options=['--delete-wal', '--log-level-console=verbose']) self.assertIn( @@ -2073,11 +1959,11 @@ def test_wal_purge(self): 'INFO: On timeline 5 all files will be removed', output) - show_tli1_after = self.show_archive(backup_dir, 'node', tli=1) - show_tli2_after = self.show_archive(backup_dir, 'node', tli=2) - show_tli3_after = self.show_archive(backup_dir, 'node', tli=3) - show_tli4_after = self.show_archive(backup_dir, 'node', tli=4) - show_tli5_after = self.show_archive(backup_dir, 'node', tli=5) + show_tli1_after = self.pb.show_archive('node', tli=1) + show_tli2_after = self.pb.show_archive('node', tli=2) + show_tli3_after = self.pb.show_archive('node', tli=3) + show_tli4_after = self.pb.show_archive('node', tli=4) + show_tli5_after = self.pb.show_archive('node', tli=5) self.assertEqual(show_tli1_before, show_tli1_after) self.assertEqual(show_tli2_before, show_tli2_after) @@ -2095,7 +1981,7 @@ def test_wal_purge(self): self.assertFalse(show_tli5_after) - self.validate_pb(backup_dir, 'node') + self.pb.validate('node') def test_wal_depth_2(self): """ @@ -2118,28 +2004,25 @@ def test_wal_depth_2(self): wal-depth=2 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_config('node', options=['--archive-timeout=60s']) node.slow_start() # STREAM FULL - stream_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + stream_id = self.pb.backup_node('node', node, options=['--stream']) node.stop() - self.set_archiving(backup_dir, 'node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL - B1 = self.backup_node(backup_dir, 'node', node) + B1 = self.pb.backup_node('node', node) node.pgbench_init(scale=1) target_xid = node.safe_psql( @@ -2148,18 +2031,16 @@ def test_wal_depth_2(self): node.pgbench_init(scale=5) # B2 FULL on TLI1 - B2 = self.backup_node(backup_dir, 'node', node) + B2 = self.pb.backup_node('node', node) node.pgbench_init(scale=4) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.pgbench_init(scale=4) # TLI 2 - node_tli2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) + node_tli2 = self.pg_node.make_simple('node_tli2') node_tli2.cleanup() - output = self.restore_node( - backup_dir, 'node', node_tli2, + output = self.pb.restore_node('node', node_tli2, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=1', @@ -2169,7 +2050,7 @@ def test_wal_depth_2(self): 'INFO: Restore of backup {0} completed'.format(B1), output) - self.set_auto_conf(node_tli2, options={'port': node_tli2.port}) + node_tli2.set_auto_conf(options={'port': node_tli2.port}) node_tli2.slow_start() node_tli2.pgbench_init(scale=4) @@ -2178,23 +2059,19 @@ def test_wal_depth_2(self): "select txid_current()").decode('utf-8').rstrip() node_tli2.pgbench_init(scale=1) - B4 = self.backup_node( - backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) + B4 = self.pb.backup_node('node', node_tli2, data_dir=node_tli2.data_dir) node_tli2.pgbench_init(scale=3) - self.backup_node( - backup_dir, 'node', node_tli2, data_dir=node_tli2.data_dir) + self.pb.backup_node('node', node_tli2, data_dir=node_tli2.data_dir) node_tli2.pgbench_init(scale=1) node_tli2.cleanup() # TLI3 - node_tli3 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) + node_tli3 = self.pg_node.make_simple('node_tli3') node_tli3.cleanup() # Note, that successful validation here is a happy coincidence - output = self.restore_node( - backup_dir, 'node', node_tli3, + output = self.pb.restore_node('node', node_tli3, options=[ '--recovery-target-xid={0}'.format(target_xid), '--recovery-target-timeline=2', @@ -2203,61 +2080,56 @@ def test_wal_depth_2(self): self.assertIn( 'INFO: Restore of backup {0} completed'.format(B1), output) - self.set_auto_conf(node_tli3, options={'port': node_tli3.port}) + node_tli3.set_auto_conf(options={'port': node_tli3.port}) node_tli3.slow_start() node_tli3.pgbench_init(scale=5) node_tli3.cleanup() # TLI4 - node_tli4 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) + node_tli4 = self.pg_node.make_simple('node_tli4') node_tli4.cleanup() - self.restore_node( - backup_dir, 'node', node_tli4, backup_id=stream_id, + self.pb.restore_node('node', node_tli4, backup_id=stream_id, options=[ '--recovery-target=immediate', '--recovery-target-action=promote']) - self.set_auto_conf(node_tli4, options={'port': node_tli4.port}) - self.set_archiving(backup_dir, 'node', node_tli4) + node_tli4.set_auto_conf(options={'port': node_tli4.port}) + self.pb.set_archiving('node', node_tli4) node_tli4.slow_start() node_tli4.pgbench_init(scale=5) - self.backup_node( - backup_dir, 'node', node_tli4, data_dir=node_tli4.data_dir) + self.pb.backup_node('node', node_tli4, data_dir=node_tli4.data_dir) node_tli4.pgbench_init(scale=5) node_tli4.cleanup() # TLI5 - node_tli5 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) + node_tli5 = self.pg_node.make_simple('node_tli5') node_tli5.cleanup() - self.restore_node( - backup_dir, 'node', node_tli5, backup_id=stream_id, + self.pb.restore_node('node', node_tli5, backup_id=stream_id, options=[ '--recovery-target=immediate', '--recovery-target-action=promote']) - self.set_auto_conf(node_tli5, options={'port': node_tli5.port}) - self.set_archiving(backup_dir, 'node', node_tli5) + node_tli5.set_auto_conf(options={'port': node_tli5.port}) + self.pb.set_archiving('node', node_tli5) node_tli5.slow_start() node_tli5.pgbench_init(scale=10) # delete '.history' file of TLI4 - os.remove(os.path.join(backup_dir, 'wal', 'node', '00000004.history')) + self.remove_instance_wal(backup_dir, 'node', '00000004.history') # delete '.history' file of TLI5 - os.remove(os.path.join(backup_dir, 'wal', 'node', '00000005.history')) + self.wait_instance_wal_exists(backup_dir, 'node', '00000005.history') + self.remove_instance_wal(backup_dir, 'node', '00000005.history') - output = self.delete_pb( - backup_dir, 'node', + output = self.pb.delete('node', options=[ '--delete-wal', '--dry-run', '--wal-depth=2', '--log-level-console=verbose']) - start_lsn_B2 = self.show_pb(backup_dir, 'node', B2)['start-lsn'] + start_lsn_B2 = self.pb.show('node', B2)['start-lsn'] self.assertIn( 'On timeline 1 WAL is protected from purge at {0}'.format(start_lsn_B2), output) @@ -2267,7 +2139,7 @@ def test_wal_depth_2(self): 'purge WAL interval between 000000010000000000000004 ' 'and 000000010000000000000005 on timeline 1'.format(B1), output) - start_lsn_B4 = self.show_pb(backup_dir, 'node', B4)['start-lsn'] + start_lsn_B4 = self.pb.show('node', B4)['start-lsn'] self.assertIn( 'On timeline 2 WAL is protected from purge at {0}'.format(start_lsn_B4), output) @@ -2282,11 +2154,11 @@ def test_wal_depth_2(self): 'from purge WAL interval between 000000010000000000000004 and ' '000000010000000000000006 on timeline 1', output) - show_tli1_before = self.show_archive(backup_dir, 'node', tli=1) - show_tli2_before = self.show_archive(backup_dir, 'node', tli=2) - show_tli3_before = self.show_archive(backup_dir, 'node', tli=3) - show_tli4_before = self.show_archive(backup_dir, 'node', tli=4) - show_tli5_before = self.show_archive(backup_dir, 'node', tli=5) + show_tli1_before = self.pb.show_archive('node', tli=1) + show_tli2_before = self.pb.show_archive('node', tli=2) + show_tli3_before = self.pb.show_archive('node', tli=3) + show_tli4_before = self.pb.show_archive('node', tli=4) + show_tli5_before = self.pb.show_archive('node', tli=5) self.assertTrue(show_tli1_before) self.assertTrue(show_tli2_before) @@ -2296,17 +2168,16 @@ def test_wal_depth_2(self): sleep(5) - output = self.delete_pb( - backup_dir, 'node', + output = self.pb.delete('node', options=['--delete-wal', '--wal-depth=2', '--log-level-console=verbose']) # print(output) - show_tli1_after = self.show_archive(backup_dir, 'node', tli=1) - show_tli2_after = self.show_archive(backup_dir, 'node', tli=2) - show_tli3_after = self.show_archive(backup_dir, 'node', tli=3) - show_tli4_after = self.show_archive(backup_dir, 'node', tli=4) - show_tli5_after = self.show_archive(backup_dir, 'node', tli=5) + show_tli1_after = self.pb.show_archive('node', tli=1) + show_tli2_after = self.pb.show_archive('node', tli=2) + show_tli3_after = self.pb.show_archive('node', tli=3) + show_tli4_after = self.pb.show_archive('node', tli=4) + show_tli5_after = self.pb.show_archive('node', tli=5) self.assertNotEqual(show_tli1_before, show_tli1_after) self.assertNotEqual(show_tli2_before, show_tli2_after) @@ -2349,7 +2220,7 @@ def test_wal_depth_2(self): show_tli2_after['lost-segments'][0]['end-segno'], '00000002000000000000000A') - self.validate_pb(backup_dir, 'node') + self.pb.validate('node') def test_basic_wal_depth(self): """ @@ -2360,46 +2231,40 @@ def test_basic_wal_depth(self): wal-depth=1 """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_config('node', options=['--archive-timeout=60s']) + self.pb.set_archiving('node', node) node.slow_start() # FULL node.pgbench_init(scale=1) - B1 = self.backup_node(backup_dir, 'node', node) + B1 = self.pb.backup_node('node', node) # B2 pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - B2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + B2 = self.pb.backup_node('node', node, backup_type='page') # B3 pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - B3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + B3 = self.pb.backup_node('node', node, backup_type='page') # B4 pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - B4 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + B4 = self.pb.backup_node('node', node, backup_type='page') # B5 pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - B5 = self.backup_node( - backup_dir, 'node', node, backup_type='page', + B5 = self.pb.backup_node('node', node, backup_type='page', options=['--wal-depth=1', '--delete-wal']) pgbench = node.pgbench(options=['-T', '10', '-c', '2']) @@ -2414,17 +2279,16 @@ def test_basic_wal_depth(self): pgbench = node.pgbench(options=['-T', '10', '-c', '2']) pgbench.wait() - tli1 = self.show_archive(backup_dir, 'node', tli=1) + tli1 = self.pb.show_archive('node', tli=1, + options=['--log-level-file=VERBOSE']) # check that there are 4 lost_segments intervals self.assertEqual(len(tli1['lost-segments']), 4) - output = self.validate_pb( - backup_dir, 'node', B5, + output = self.pb.validate( + 'node', B5, options=['--recovery-target-xid={0}'.format(target_xid)]) - print(output) - self.assertIn( 'INFO: Backup validation completed successfully on time', output) @@ -2434,96 +2298,85 @@ def test_basic_wal_depth(self): output) for backup_id in [B1, B2, B3, B4]: - try: - self.validate_pb( - backup_dir, 'node', backup_id, - options=['--recovery-target-xid={0}'.format(target_xid)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "without valid full backup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Not enough WAL records to xid {0}".format(target_xid), - e.message) - - self.validate_pb(backup_dir, 'node') + self.pb.validate('node', backup_id, + options=['--recovery-target-xid', target_xid], + expect_error="because page backup should not be " + "possible without valid full backup") + self.assertMessage(contains=f"ERROR: Not enough WAL records to xid {target_xid}") + + self.pb.validate('node') + @needs_gdb def test_concurrent_running_full_backup(self): """ https://github.com/postgrespro/pg_probackup/issues/328 """ - self._check_gdb_flag_or_skip_test() - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_data_file') gdb.run_until_break() gdb.kill() self.assertTrue( - self.show_pb(backup_dir, 'node')[0]['status'], + self.pb.show('node')[0]['status'], 'RUNNING') - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.pb.backup_node('node', node, backup_type='delta', options=['--retention-redundancy=2', '--delete-expired']) self.assertTrue( - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'RUNNING') - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_data_file') gdb.run_until_break() gdb.kill() - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_data_file') gdb.run_until_break() gdb.kill() - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) - gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + gdb = self.pb.backup_node('node', node, gdb=True) gdb.set_breakpoint('backup_data_file') gdb.run_until_break() gdb.kill() - self.backup_node( - backup_dir, 'node', node, backup_type='delta', + self.expire_locks(backup_dir, 'node') + + self.pb.backup_node('node', node, backup_type='delta', options=['--retention-redundancy=2', '--delete-expired'], return_id=False) self.assertTrue( - self.show_pb(backup_dir, 'node')[0]['status'], + self.pb.show('node')[0]['status'], 'OK') self.assertTrue( - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'RUNNING') self.assertTrue( - self.show_pb(backup_dir, 'node')[2]['status'], + self.pb.show('node')[2]['status'], 'OK') self.assertEqual( - len(self.show_pb(backup_dir, 'node')), + len(self.pb.show('node')), 6) diff --git a/tests/set_backup_test.py b/tests/set_backup_test.py index 31334cfba..0d149ce63 100644 --- a/tests/set_backup_test.py +++ b/tests/set_backup_test.py @@ -1,118 +1,75 @@ import unittest import subprocess import os -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + +from .helpers.data_helpers import tail_file +from .helpers.ptrack_helpers import ProbackupTest from sys import exit from datetime import datetime, timedelta +from .helpers.enums.date_time_enum import DateTimePattern - -class SetBackupTest(ProbackupTest, unittest.TestCase): +class SetBackupTest(ProbackupTest): # @unittest.expectedFailure # @unittest.skip("skip") def test_set_backup_sanity(self): """general sanity for set-backup command""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node('node', node, options=['--stream']) - recovery_time = self.show_pb( - backup_dir, 'node', backup_id=backup_id)['recovery-time'] + recovery_time = self.pb.show('node', backup_id=backup_id)['recovery-time'] + # Remove microseconds + recovery_time = datetime.strptime(recovery_time + '00', DateTimePattern.Y_m_d_H_M_S_f_z_dash.value) + recovery_time = recovery_time.strftime(DateTimePattern.Y_m_d_H_M_S_z_dash.value) expire_time_1 = "{:%Y-%m-%d %H:%M:%S}".format( datetime.now() + timedelta(days=5)) - try: - self.set_backup(backup_dir, False, options=['--ttl=30d']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing instance. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Required parameter not specified: --instance', - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - try: - self.set_backup( - backup_dir, 'node', - options=[ - "--ttl=30d", - "--expire-time='{0}'".format(expire_time_1)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because options cannot be mixed. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--expire-time' " - "and '--ttl' options together", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - try: - self.set_backup(backup_dir, 'node', options=["--ttl=30d"]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing backup_id. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You must specify parameter (-i, --backup-id) " - "for 'set-backup' command", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.set_backup( - backup_dir, 'node', backup_id, options=["--ttl=30d"]) - - actual_expire_time = self.show_pb( - backup_dir, 'node', backup_id=backup_id)['expire-time'] + self.pb.set_backup(False, options=['--ttl=30d'], + expect_error="because of missing instance") + self.assertMessage(contains='ERROR: Required parameter not specified: --instance') + + self.pb.set_backup('node', + options=["--ttl=30d", f"--expire-time='{expire_time_1}'"], + expect_error="because options cannot be mixed") + self.assertMessage(contains="ERROR: You cannot specify '--expire-time' " + "and '--ttl' options together") + + self.pb.set_backup('node', options=["--ttl=30d"], + expect_error="because of missing backup_id") + self.assertMessage(contains="ERROR: You must specify parameter (-i, " + "--backup-id) for 'set-backup' command") + + self.pb.set_backup('node', backup_id, options=["--ttl=30d"]) + + actual_expire_time = self.pb.show('node', backup_id=backup_id)['expire-time'] self.assertNotEqual(expire_time_1, actual_expire_time) expire_time_2 = "{:%Y-%m-%d %H:%M:%S}".format( datetime.now() + timedelta(days=6)) - self.set_backup( - backup_dir, 'node', backup_id, + self.pb.set_backup('node', backup_id, options=["--expire-time={0}".format(expire_time_2)]) - actual_expire_time = self.show_pb( - backup_dir, 'node', backup_id=backup_id)['expire-time'] + actual_expire_time = self.pb.show('node', backup_id=backup_id)['expire-time'] self.assertIn(expire_time_2, actual_expire_time) # unpin backup - self.set_backup( - backup_dir, 'node', backup_id, options=["--ttl=0"]) + self.pb.set_backup('node', backup_id, options=["--ttl=0"]) - attr_list = self.show_pb( - backup_dir, 'node', backup_id=backup_id) + attr_list = self.pb.show('node', backup_id=backup_id) self.assertNotIn('expire-time', attr_list) - self.set_backup( - backup_dir, 'node', backup_id, options=["--expire-time={0}".format(recovery_time)]) + self.pb.set_backup('node', backup_id, options=["--expire-time={0}".format(recovery_time)]) # parse string to datetime object #new_expire_time = datetime.strptime(new_expire_time, '%Y-%m-%d %H:%M:%S%z') @@ -121,42 +78,31 @@ def test_set_backup_sanity(self): # @unittest.expectedFailure def test_retention_redundancy_pinning(self): """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() + node = self.pg_node.make_simple('node') - with open(os.path.join( - backup_dir, 'backups', 'node', - "pg_probackup.conf"), "a") as conf: - conf.write("retention-redundancy = 1\n") + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() - self.set_config( - backup_dir, 'node', options=['--retention-redundancy=1']) + self.pb.set_config('node', options=['--retention-redundancy=1']) # Make backups to be purged - full_id = self.backup_node(backup_dir, 'node', node) - page_id = self.backup_node( - backup_dir, 'node', node, backup_type="page") + full_id = self.pb.backup_node('node', node) + page_id = self.pb.backup_node('node', node, backup_type="page") # Make backups to be keeped - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type="page") + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type="page") - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + self.assertEqual(len(self.pb.show('node')), 4) - self.set_backup( - backup_dir, 'node', page_id, options=['--ttl=5d']) + self.pb.set_backup('node', page_id, options=['--ttl=5d']) # Purge backups - log = self.delete_expired( - backup_dir, 'node', + log = self.pb.delete_expired( + 'node', options=['--delete-expired', '--log-level-console=LOG']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + self.assertEqual(len(self.pb.show('node')), 4) self.assertIn('Time Window: 0d/5d', log) self.assertIn( @@ -170,53 +116,42 @@ def test_retention_redundancy_pinning(self): # @unittest.skip("skip") def test_retention_window_pinning(self): """purge all backups using window-based retention policy""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL BACKUP - backup_id_1 = self.backup_node(backup_dir, 'node', node) - page1 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_1 = self.pb.backup_node('node', node) + page1 = self.pb.backup_node('node', node, backup_type='page') # Take second FULL BACKUP - backup_id_2 = self.backup_node(backup_dir, 'node', node) - page2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node('node', node) + page2 = self.pb.backup_node('node', node, backup_type='page') # Take third FULL BACKUP - backup_id_3 = self.backup_node(backup_dir, 'node', node) - page2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') - - backups = os.path.join(backup_dir, 'backups', 'node') - for backup in os.listdir(backups): - if backup == 'pg_probackup.conf': - continue - with open( - os.path.join( - backups, backup, "backup.control"), "a") as conf: - conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( - datetime.now() - timedelta(days=3))) - - self.set_backup( - backup_dir, 'node', page1, options=['--ttl=30d']) + backup_id_3 = self.pb.backup_node('node', node) + page2 = self.pb.backup_node('node', node, backup_type='page') + + for backup in backup_dir.list_instance_backups('node'): + with self.modify_backup_control(backup_dir, 'node', backup) as cf: + cf.data += "\nrecovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() - timedelta(days=3)) + + self.pb.set_backup('node', page1, options=['--ttl=30d']) # Purge backups - out = self.delete_expired( - backup_dir, 'node', + out = self.pb.delete_expired( + 'node', options=[ '--log-level-console=LOG', '--retention-window=1', '--delete-expired']) - self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) + self.assertEqual(len(self.pb.show('node')), 2) self.assertIn( 'LOG: Backup {0} is pinned until'.format(page1), out) @@ -237,26 +172,21 @@ def test_wal_retention_and_pinning(self): B1 B2---P---B3---> """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # take FULL BACKUP - self.backup_node( - backup_dir, 'node', node, options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) node.pgbench_init(scale=1) # Take PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, backup_type='page', options=['--stream']) node.pgbench_init(scale=1) @@ -264,8 +194,7 @@ def test_wal_retention_and_pinning(self): # Take DELTA BACKUP and pin it expire_time = "{:%Y-%m-%d %H:%M:%S}".format( datetime.now() + timedelta(days=6)) - backup_id_pinned = self.backup_node( - backup_dir, 'node', node, + backup_id_pinned = self.pb.backup_node('node', node, backup_type='delta', options=[ '--stream', @@ -274,14 +203,16 @@ def test_wal_retention_and_pinning(self): node.pgbench_init(scale=1) # Take second PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--stream']) + self.pb.backup_node('node', node, backup_type='delta', options=['--stream']) node.pgbench_init(scale=1) + tailer = tail_file(os.path.join(node.logs_dir, 'postgresql.log')) + tailer.wait(contains='LOG: pushing file "000000010000000000000004"') + # Purge backups - out = self.delete_expired( - backup_dir, 'node', + out = self.pb.delete_expired( + 'node', options=[ '--log-level-console=LOG', '--delete-wal', '--wal-depth=2']) @@ -292,15 +223,13 @@ def test_wal_retention_and_pinning(self): 'purpose of WAL retention'.format(backup_id_pinned), out) - for instance in self.show_archive(backup_dir): + for instance in self.pb.show_archive(): timelines = instance['timelines'] - - # sanity - for timeline in timelines: - self.assertEqual( - timeline['min-segno'], - '000000010000000000000004') - self.assertEqual(timeline['status'], 'OK') + for timeline in timelines: + self.assertEqual( + timeline['min-segno'], + '000000010000000000000004') + self.assertEqual(timeline['status'], 'OK') # @unittest.skip("skip") def test_wal_retention_and_pinning_1(self): @@ -313,35 +242,37 @@ def test_wal_retention_and_pinning_1(self): P---B1---> """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() expire_time = "{:%Y-%m-%d %H:%M:%S}".format( datetime.now() + timedelta(days=6)) # take FULL BACKUP - backup_id_pinned = self.backup_node( - backup_dir, 'node', node, + backup_id_pinned = self.pb.backup_node('node', node, options=['--expire-time={0}'.format(expire_time)]) node.pgbench_init(scale=2) # Take second PAGE BACKUP - self.backup_node( - backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') node.pgbench_init(scale=2) + self.wait_instance_wal_exists(backup_dir, 'node', + "000000010000000000000001.gz") + + tailer = tail_file(os.path.join(node.logs_dir, 'postgresql.log')) + tailer.wait(contains='LOG: pushing file "000000010000000000000002"') + # Purge backups - out = self.delete_expired( - backup_dir, 'node', + out = self.pb.delete_expired( + 'node', options=[ '--log-level-console=verbose', '--delete-wal', '--wal-depth=2']) @@ -352,60 +283,51 @@ def test_wal_retention_and_pinning_1(self): 'purpose of WAL retention'.format(backup_id_pinned), out) - for instance in self.show_archive(backup_dir): + for instance in self.pb.show_archive(): timelines = instance['timelines'] + for timeline in timelines: + self.assertEqual( + timeline['min-segno'], + '000000010000000000000002') + self.assertEqual(timeline['status'], 'OK') - # sanity - for timeline in timelines: - self.assertEqual( - timeline['min-segno'], - '000000010000000000000002') - self.assertEqual(timeline['status'], 'OK') - - self.validate_pb(backup_dir) + self.pb.validate() # @unittest.skip("skip") def test_add_note_newlines(self): """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=['--stream', '--note={0}'.format('hello\nhello')]) - backup_meta = self.show_pb(backup_dir, 'node', backup_id) + backup_meta = self.pb.show('node', backup_id) self.assertEqual(backup_meta['note'], "hello") - self.set_backup(backup_dir, 'node', backup_id, options=['--note=hello\nhello']) + self.pb.set_backup('node', backup_id, options=['--note=hello\nhello']) - backup_meta = self.show_pb(backup_dir, 'node', backup_id) + backup_meta = self.pb.show('node', backup_id) self.assertEqual(backup_meta['note'], "hello") - self.set_backup(backup_dir, 'node', backup_id, options=['--note=none']) + self.pb.set_backup('node', backup_id, options=['--note=none']) - backup_meta = self.show_pb(backup_dir, 'node', backup_id) + backup_meta = self.pb.show('node', backup_id) self.assertNotIn('note', backup_meta) # @unittest.skip("skip") def test_add_big_note(self): """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # note = node.safe_psql( @@ -417,46 +339,30 @@ def test_add_big_note(self): "SELECT repeat('hello', 210)").rstrip() # FULL - try: - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--note={0}'.format(note)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because note is too large " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup note cannot exceed 1024 bytes", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.backup_node('node', node, + options=['--stream', '--note', note], + expect_error="because note is too large") + self.assertMessage(contains="ERROR: Backup note cannot exceed 1024 bytes") note = node.safe_psql( "postgres", "SELECT repeat('hello', 200)").decode('utf-8').rstrip() - backup_id = self.backup_node( - backup_dir, 'node', node, + backup_id = self.pb.backup_node('node', node, options=['--stream', '--note={0}'.format(note)]) - backup_meta = self.show_pb(backup_dir, 'node', backup_id) + backup_meta = self.pb.show('node', backup_id) self.assertEqual(backup_meta['note'], note) # @unittest.skip("skip") def test_add_big_note_1(self): """""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() note = node.safe_psql( @@ -464,13 +370,56 @@ def test_add_big_note_1(self): "SELECT repeat('q', 1024)").decode('utf-8').rstrip() # FULL - backup_id = self.backup_node(backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node('node', node, options=['--stream']) - self.set_backup( - backup_dir, 'node', backup_id, + self.pb.set_backup('node', backup_id, options=['--note={0}'.format(note)]) - backup_meta = self.show_pb(backup_dir, 'node', backup_id) + backup_meta = self.pb.show('node', backup_id) print(backup_meta) self.assertEqual(backup_meta['note'], note) + +#################################################################### +# dry-run +#################################################################### + + def test_basic_dry_run_set_backup(self): + """""" + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + node.slow_start() + + note = node.safe_psql( + "postgres", + "SELECT repeat('q', 1024)").decode('utf-8').rstrip() + + backup_id = self.pb.backup_node('node', node, options=['--stream']) + + expire_time = "{:%Y-%m-%d %H:%M:%S}".format( + datetime.now() + timedelta(days=6)) + + self.pb.set_backup('node', backup_id, + options=['--expire-time={}'.format(expire_time), + '--dry-run', + '--note={0}'.format(note)]) + + backup_meta = self.pb.show('node', backup_id) + + print(backup_meta) + self.assertFalse(any('expire-time' in d for d in backup_meta)) + self.assertFalse(any('note' in d for d in backup_meta)) + + self.pb.set_backup('node', backup_id, + options=['--ttl=30d', + '--dry-run', + '--note={0}'.format(note)]) + + backup_meta = self.pb.show('node', backup_id) + + print(backup_meta) + self.assertFalse(any('ttl' in d for d in backup_meta)) + self.assertFalse(any('note' in d for d in backup_meta)) diff --git a/tests/show_test.py b/tests/show_test.py index c4b96499d..ae4fd2822 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1,117 +1,106 @@ -import os +import copy import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import os + +from .compression_test import have_alg +from .helpers.ptrack_helpers import ProbackupTest, fs_backup_class +from .helpers.state_helper import get_program_version +from .helpers.validators.show_validator import ShowJsonResultValidator -class ShowTest(ProbackupTest, unittest.TestCase): +class ShowTest(ProbackupTest): # @unittest.skip("skip") # @unittest.expectedFailure def test_show_1(self): """Status DONE and OK""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.assertEqual( - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=["--log-level-console=off"]), None ) - self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn("OK", self.pb.show('node', as_text=True)) # @unittest.skip("skip") # @unittest.expectedFailure def test_show_json(self): """Status DONE and OK""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() self.assertEqual( - self.backup_node( - backup_dir, 'node', node, + self.pb.backup_node('node', node, options=["--log-level-console=off"]), None ) - self.backup_node(backup_dir, 'node', node) - self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) + self.pb.backup_node('node', node) + self.assertIn("OK", self.pb.show('node', as_text=True)) # @unittest.skip("skip") def test_corrupt_2(self): """Status CORRUPT""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # delete file which belong to backup - file = os.path.join( - backup_dir, "backups", "node", - backup_id, "database", "postgresql.conf") - os.remove(file) - - try: - self.validate_pb(backup_dir, 'node', backup_id) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because backup corrupted." - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd - ) - ) - except ProbackupException as e: - self.assertIn( - 'data files are corrupted', - e.message, - '\n Unexpected Error Message: {0}\n' - ' CMD: {1}'.format(repr(e.message), self.cmd) - ) - self.assertIn("CORRUPT", self.show_pb(backup_dir, as_text=True)) + self.remove_backup_file(backup_dir, 'node', backup_id, "database/postgresql.conf") + + error_result = self.pb.validate('node', backup_id, expect_error=True) + + self.assertMessage(error_result, contains='data files are corrupted') + self.assertIn("CORRUPT", self.pb.show(as_text=True)) + + def test_failed_backup_status(self): + """Status ERROR - showing recovery-time for faield backup""" + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + self.pb.backup_node('node', node, backup_type='delta', expect_error=True) + + show_res = self.pb.show('node', as_text=True, as_json=False) + self.assertIn("ERROR", show_res) + self.assertIn("Recovery Time", show_res) + self.assertNotIn("---- DELTA", show_res) # @unittest.skip("skip") def test_no_control_file(self): """backup.control doesn't exist""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # delete backup.control file - file = os.path.join( - backup_dir, "backups", "node", - backup_id, "backup.control") - os.remove(file) + self.remove_backup_file(backup_dir, "node", backup_id, "backup.control") - output = self.show_pb(backup_dir, 'node', as_text=True, as_json=False) + output = self.pb.show('node', as_text=True, as_json=False) self.assertIn( 'Control file', @@ -124,26 +113,20 @@ def test_no_control_file(self): # @unittest.skip("skip") def test_empty_control_file(self): """backup.control is empty""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # truncate backup.control file - file = os.path.join( - backup_dir, "backups", "node", - backup_id, "backup.control") - fd = open(file, 'w') - fd.close() + with self.modify_backup_control(self.backup_dir, 'node', backup_id) as cf: + cf.data = '' - output = self.show_pb(backup_dir, 'node', as_text=True, as_json=False) + output = self.pb.show('node', as_text=True, as_json=False) self.assertIn( 'Control file', @@ -157,29 +140,22 @@ def test_empty_control_file(self): # @unittest.expectedFailure def test_corrupt_control_file(self): """backup.control contains invalid option""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # corrupt backup.control file - file = os.path.join( - backup_dir, "backups", "node", - backup_id, "backup.control") - fd = open(file, 'a') - fd.write("statuss = OK") - fd.close() + with self.modify_backup_control(self.backup_dir, 'node', backup_id) as cf: + cf.data += "\nstatuss = OK" self.assertIn( 'WARNING: Invalid option "statuss" in file', - self.show_pb(backup_dir, 'node', as_json=False, as_text=True)) + self.pb.show('node', as_json=False, as_text=True)) # @unittest.skip("skip") # @unittest.expectedFailure @@ -188,88 +164,65 @@ def test_corrupt_correctness(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=1) # FULL - backup_local_id = self.backup_node( - backup_dir, 'node', node, no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) + backup_local_id = self.pb.backup_node('node', node, no_remote=True) - backup_remote_id = self.backup_node(backup_dir, 'node', node) - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + backup_remote_id = self.pb.backup_node('node', node) # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.check_backup_size_in_show(backup_local_id, backup_remote_id, 'node', compressed=True) + self.check_backup_size_in_show(backup_local_id, backup_remote_id, 'node', compressed=False) # DELTA - backup_local_id = self.backup_node( - backup_dir, 'node', node, + backup_local_id = self.pb.backup_node('node', node, backup_type='delta', no_remote=True) - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) + output_local = self.pb.show('node', as_json=False, backup_id=backup_local_id) + self.pb.delete('node', backup_local_id) - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_remote_id = self.pb.backup_node('node', node, backup_type='delta') - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) + output_remote = self.pb.show('node', as_json=False, backup_id=backup_remote_id) + self.pb.delete('node', backup_remote_id) # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) + self.assertAlmostEqual( + int(output_local['data-bytes']), + int(output_remote['data-bytes']), delta=2) - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.assertAlmostEqual( + int(output_local['uncompressed-bytes']), + int(output_remote['uncompressed-bytes']), delta=2) # PAGE - backup_local_id = self.backup_node( - backup_dir, 'node', node, + backup_local_id = self.pb.backup_node('node', node, backup_type='page', no_remote=True) - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) + output_local = self.pb.show('node', as_json=False, backup_id=backup_local_id) + self.pb.delete('node', backup_local_id) - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_remote_id = self.pb.backup_node('node', node, backup_type='page') - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) + output_remote = self.pb.show('node', as_json=False, backup_id=backup_remote_id) + self.pb.delete('node', backup_remote_id) # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) + self.assertAlmostEqual( + int(output_local['data-bytes']), + int(output_remote['data-bytes']), delta=2) - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.assertAlmostEqual( + int(output_local['uncompressed-bytes']), + int(output_remote['uncompressed-bytes']), delta=2) # @unittest.skip("skip") # @unittest.expectedFailure @@ -278,92 +231,78 @@ def test_corrupt_correctness_1(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() + # stabilize test + # there were situation that due to internal wal segment switches + # backup_label differed in size: + # - first page backup had 0/E000028 location, and + # - second page backup - 0/10000028 + # Stabilize by adding more segments therefore it is always long + for i in range(8): + self.switch_wal_segment(node) + node.pgbench_init(scale=1) # FULL - backup_local_id = self.backup_node( - backup_dir, 'node', node, no_remote=True) - - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) + backup_local_id = self.pb.backup_node('node', node, no_remote=True) - backup_remote_id = self.backup_node(backup_dir, 'node', node) - - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) + backup_remote_id = self.pb.backup_node('node', node) # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.check_backup_size_in_show(backup_local_id, backup_remote_id, 'node', compressed=True) + self.check_backup_size_in_show(backup_local_id, backup_remote_id, 'node', compressed=False) # change data pgbench = node.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() # DELTA - backup_local_id = self.backup_node( - backup_dir, 'node', node, + backup_local_id = self.pb.backup_node('node', node, backup_type='delta', no_remote=True) - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) + output_local = self.pb.show('node', as_json=False, backup_id=backup_local_id) + self.pb.delete('node', backup_local_id) - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta') + backup_remote_id = self.pb.backup_node('node', node, backup_type='delta') - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) + output_remote = self.pb.show('node', as_json=False, backup_id=backup_remote_id) + self.pb.delete('node', backup_remote_id) # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) + self.assertAlmostEqual( + int(output_local['data-bytes']), + int(output_remote['data-bytes']), delta=2) - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.assertAlmostEqual( + int(output_local['uncompressed-bytes']), + int(output_remote['uncompressed-bytes']), delta=2) # PAGE - backup_local_id = self.backup_node( - backup_dir, 'node', node, + backup_local_id = self.pb.backup_node('node', node, backup_type='page', no_remote=True) - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) + output_local = self.pb.show('node', as_json=False, backup_id=backup_local_id) + self.pb.delete('node', backup_local_id) - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_remote_id = self.pb.backup_node('node', node, backup_type='page') - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) + output_remote = self.pb.show('node', as_json=False, backup_id=backup_remote_id) + self.pb.delete('node', backup_remote_id) # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) + self.assertAlmostEqual( + int(output_local['data-bytes']), + int(output_remote['data-bytes']), delta=2) - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.assertAlmostEqual( + int(output_local['uncompressed-bytes']), + int(output_remote['uncompressed-bytes']), delta=2) # @unittest.skip("skip") # @unittest.expectedFailure @@ -372,138 +311,305 @@ def test_corrupt_correctness_2(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() + # stabilize test + # there were situation that due to internal wal segment switches + # backup_label differed in size: + # - first page backup had 0/E000028 location, and + # - second page backup - 0/10000028 + # Stabilize by adding more segments therefore it is always long + for i in range(8): + self.switch_wal_segment(node) + node.pgbench_init(scale=1) # FULL - backup_local_id = self.backup_node( - backup_dir, 'node', node, + backup_local_id = self.pb.backup_node('node', node, options=['--compress'], no_remote=True) - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, options=['--compress']) + backup_remote_id = self.pb.backup_node('node', node, options=['--compress']) else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, + backup_remote_id = self.pb.backup_node('node', node, options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) - - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.check_backup_size_in_show(backup_local_id, backup_remote_id, 'node', compressed=True) + self.check_backup_size_in_show(backup_local_id, backup_remote_id, 'node', compressed=False) # change data pgbench = node.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() # DELTA - backup_local_id = self.backup_node( - backup_dir, 'node', node, + backup_local_id = self.pb.backup_node('node', node, backup_type='delta', options=['--compress'], no_remote=True) - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) + output_local = self.pb.show('node', as_json=False, backup_id=backup_local_id) + self.pb.delete('node', backup_local_id) if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', options=['--compress']) + backup_remote_id = self.pb.backup_node('node', node, backup_type='delta', options=['--compress']) else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='delta', + backup_remote_id = self.pb.backup_node('node', node, backup_type='delta', options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) + output_remote = self.pb.show('node', as_json=False, backup_id=backup_remote_id) + self.pb.delete('node', backup_remote_id) # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) + self.assertAlmostEqual( + int(output_local['data-bytes']), + int(output_remote['data-bytes']), delta=2) - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.assertAlmostEqual( + int(output_local['uncompressed-bytes']), + int(output_remote['uncompressed-bytes']), delta=2) # PAGE - backup_local_id = self.backup_node( - backup_dir, 'node', node, + backup_local_id = self.pb.backup_node('node', node, backup_type='page', options=['--compress'], no_remote=True) - output_local = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_local_id) - self.delete_pb(backup_dir, 'node', backup_local_id) + output_local = self.pb.show('node', as_json=False, backup_id=backup_local_id) + self.pb.delete('node', backup_local_id) if self.remote: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', options=['--compress']) + backup_remote_id = self.pb.backup_node('node', node, backup_type='page', options=['--compress']) else: - backup_remote_id = self.backup_node( - backup_dir, 'node', node, backup_type='page', + backup_remote_id = self.pb.backup_node('node', node, backup_type='page', options=['--remote-proto=ssh', '--remote-host=localhost', '--compress']) - output_remote = self.show_pb( - backup_dir, 'node', as_json=False, backup_id=backup_remote_id) - self.delete_pb(backup_dir, 'node', backup_remote_id) + output_remote = self.pb.show('node', as_json=False, backup_id=backup_remote_id) + self.pb.delete('node', backup_remote_id) # check correctness - self.assertEqual( - output_local['data-bytes'], - output_remote['data-bytes']) + self.assertAlmostEqual( + int(output_local['data-bytes']), + int(output_remote['data-bytes']), delta=2) - self.assertEqual( - output_local['uncompressed-bytes'], - output_remote['uncompressed-bytes']) + self.assertAlmostEqual( + int(output_local['uncompressed-bytes']), + int(output_remote['uncompressed-bytes']), delta=2) # @unittest.skip("skip") # @unittest.expectedFailure def test_color_with_no_terminal(self): """backup.control contains invalid option""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums'], + node = self.pg_node.make_simple('node', pg_options={'autovacuum': 'off'}) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() node.pgbench_init(scale=1) # FULL - try: - self.backup_node( - backup_dir, 'node', node, options=['--archive-timeout=1s']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because archiving is disabled\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertNotIn( - '[0m', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + error_result = self.pb.backup_node('node', node, options=['--archive-timeout=1s'], expect_error=True) + self.assertNotIn('[0m', error_result) + + @unittest.skipIf(not (have_alg('lz4') and have_alg('zstd')), + "pg_probackup is not compiled with lz4 or zstd support") + def test_show_command_as_text(self): + instance_name = 'node' + node = self.pg_node.make_simple( + base_dir=instance_name) + + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) + node.slow_start() + + self.pb.backup_node(instance_name, node, backup_type="full", options=['--compress-level', '1', + '--compress-algorithm', 'pglz']) + + self.pb.backup_node(instance_name, node, backup_type="delta", options=['--compress-level', '3', + '--compress-algorithm', 'lz4']) + + self.pb.backup_node(instance_name, node, backup_type="page", options=['--compress-level', '9', + '--compress-algorithm', 'zstd']) + self.pb.backup_node(instance_name, node, backup_type="page") + + show_backups = self.pb.show(instance_name, as_text=True, as_json=False) + self.assertIn(" FULL ARCHIVE ", show_backups) # Mode, Wal mode + self.assertIn(" DELTA ARCHIVE ", show_backups) # Mode, Wal mode + self.assertIn(" PAGE ARCHIVE ", show_backups) # Mode, Wal mode + self.assertIn(" pglz ", show_backups) + self.assertIn(" lz4 ", show_backups) + self.assertIn(" zstd ", show_backups) + self.assertIn(" none ", show_backups) + self.assertIn(" OK ", show_backups) # Status + + def test_show_command_as_json(self): + instance_name = 'node' + node = self.pg_node.make_simple( + base_dir=instance_name) + + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) + node.slow_start() + + pg_version = int(self.pg_config_version/10000) + + full_backup_id = self.pb.backup_node(instance_name, node, backup_type="full") + + delta_backup_id = self.pb.backup_node(instance_name, node, backup_type="delta") + + page_backup_id = self.pb.backup_node(instance_name, node, backup_type="page") + + show_backups = self.pb.show(instance_name, as_text=False, as_json=True) + + common_show_result = ShowJsonResultValidator() + common_show_result.wal = "ARCHIVE" + common_show_result.compress_alg = "none" + common_show_result.compress_level = 1 + common_show_result.from_replica = "false" + common_show_result.block_size = 8192 + common_show_result.xlog_block_size = 8192 + common_show_result.checksum_version = 1 + common_show_result.program_version = get_program_version() + common_show_result.server_version = pg_version + common_show_result.status = "OK" + + full_show_result = copy.deepcopy(common_show_result) + full_show_result.backup_mode = "FULL" + full_show_result.set_backup_id = full_backup_id + + delta_show_result = copy.deepcopy(common_show_result) + delta_show_result.backup_mode = "DELTA" + delta_show_result.backup_id = delta_backup_id + delta_show_result.parent_backup_id = full_backup_id + + page_show_result = copy.deepcopy(common_show_result) + page_show_result.backup_mode = "PAGE" + page_show_result.backup_id = page_backup_id + page_show_result.parent_backup_id = delta_backup_id + + full_show_result.check_show_json(show_backups[0]) + delta_show_result.check_show_json(show_backups[1]) + page_show_result.check_show_json(show_backups[2]) + + def test_tablespace_print_issue_431(self): + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + # Create tablespace + tblspc_path = os.path.join(node.base_dir, "tblspc") + os.makedirs(tblspc_path) + with node.connect("postgres") as con: + con.connection.autocommit = True + con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path) + con.connection.autocommit = False + con.execute("CREATE TABLE test (id int) TABLESPACE tblspc") + con.execute("INSERT INTO test VALUES (1)") + con.commit() + + full_backup_id = self.pb.backup_node('node', node) + self.assertIn("OK", self.pb.show('node', as_text=True)) + # Check that tablespace info exists. JSON + self.assertIn("tablespace_map", self.pb.show('node', as_text=True)) + self.assertIn("oid", self.pb.show('node', as_text=True)) + self.assertIn("path", self.pb.show('node', as_text=True)) + self.assertIn(tblspc_path, self.pb.show('node', as_text=True)) + # Check that tablespace info exists. PLAIN + self.assertIn("tablespace_map", self.pb.show('node', backup_id=full_backup_id, as_text=True, as_json=False)) + self.assertIn(tblspc_path, self.pb.show('node', backup_id=full_backup_id, as_text=True, as_json=False)) + # Check that tablespace info NOT exists if backup id not provided. PLAIN + self.assertNotIn("tablespace_map", self.pb.show('node', as_text=True, as_json=False)) + + def test_show_hidden_merged_dirs_as_json(self): + instance_name = 'node' + node = self.pg_node.make_simple( + base_dir=instance_name) + + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) + node.slow_start() + + pg_version = int(self.pg_config_version/10000) + + full_backup_id = self.pb.backup_node(instance_name, node, backup_type="full") + + delta_backup_id = self.pb.backup_node(instance_name, node, backup_type="delta") + + page_backup_id = self.pb.backup_node(instance_name, node, backup_type="page") + + self.pb.merge_backup(instance_name, delta_backup_id) + show_backups = self.pb.show(instance_name, as_text=False, as_json=True, options=["--show-symlinks"]) + + self.assertEqual(show_backups[0]['backup-mode'], "FULL") + self.assertEqual(show_backups[0]['id'], delta_backup_id) + self.assertEqual(show_backups[0]['id'], show_backups[1]['id']) + self.assertEqual(show_backups[0]['dir'], full_backup_id) + + self.assertEqual(show_backups[1]['status'], "SYMLINK") + self.assertEqual(show_backups[1]['id'], show_backups[0]['id']) + self.assertEqual(show_backups[1]['symlink'], full_backup_id) + + + def test_show_hidden_merged_dirs_as_plain(self): + instance_name = 'node' + node = self.pg_node.make_simple( + base_dir=instance_name) + + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) + node.slow_start() + + pg_version = int(self.pg_config_version/10000) + + full_backup_id = self.pb.backup_node(instance_name, node, backup_type="full") + + delta_backup_id = self.pb.backup_node(instance_name, node, backup_type="delta") + + page_backup_id = self.pb.backup_node(instance_name, node, backup_type="page") + + self.pb.merge_backup(instance_name, delta_backup_id) + show_backups = self.pb.show(instance_name, as_text=True, as_json=False, options=["--show-symlinks"]) + + self.assertIn(" PAGE ARCHIVE ", show_backups) # Mode, Wal mode + self.assertIn(" FULL ARCHIVE ", show_backups) # Mode, Wal mode + + def get_backup_label_size(self, backup_id, instance_name): + """Get backup_label size from file backup_content.control""" + content_control_json = self.read_backup_content_control(backup_id, instance_name) + for item in content_control_json: + if item.get('path') == 'backup_label': + return item['size'] + + def check_backup_size_in_show(self, first_backup_id, second_backup_id, instance_name, compressed=True): + """Use show command to check backup size. If we have difference, + try to compare size without backuo_label file""" + first_out = self.pb.show('node', as_json=False, backup_id=first_backup_id) + + second_out = self.pb.show('node', as_json=False, backup_id=second_backup_id) + + # check correctness + if compressed: + first_size = first_out['data-bytes'] + second_size = second_out['data-bytes'] + else: + first_size = first_out['uncompressed-bytes'] + second_size = second_out['uncompressed-bytes'] + if fs_backup_class.is_file_based: + local_label_size = self.get_backup_label_size(first_backup_id, instance_name) + remote_label_size = self.get_backup_label_size(second_backup_id, instance_name) + # If we have difference in full size check without backup_label file + self.assertTrue(first_size == second_size or + first_size - local_label_size == second_size - remote_label_size) + self.assertAlmostEqual(int(local_label_size), int(remote_label_size), delta=2) + else: + self.assertAlmostEqual(int(first_size), int(second_size), delta=2) diff --git a/tests/time_consuming_test.py b/tests/time_consuming_test.py index c0038c085..3da2208db 100644 --- a/tests/time_consuming_test.py +++ b/tests/time_consuming_test.py @@ -5,7 +5,7 @@ from time import sleep -class TimeConsumingTests(ProbackupTest, unittest.TestCase): +class TimeConsumingTests(ProbackupTest): def test_pbckp150(self): """ https://jira.postgrespro.ru/browse/PBCKP-150 @@ -19,11 +19,9 @@ def test_pbckp150(self): if not self.ptrack: self.skipTest('Skipped because ptrack support is disabled') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), + node = self.pg_node.make_simple('node', set_replication=True, ptrack_enable=self.ptrack, - initdb_params=['--data-checksums'], pg_options={ 'max_connections': 100, 'log_statement': 'none', @@ -32,14 +30,13 @@ def test_pbckp150(self): 'ptrack.map_size': 1}) if node.major_version >= 13: - self.set_auto_conf(node, {'wal_keep_size': '16000MB'}) + node.set_auto_conf({'wal_keep_size': '16000MB'}) else: - self.set_auto_conf(node, {'wal_keep_segments': '1000'}) + node.set_auto_conf({'wal_keep_segments': '1000'}) # init probackup and add an instance - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) # run the node and init ptrack node.slow_start() @@ -48,8 +45,8 @@ def test_pbckp150(self): node.pgbench_init(scale=5) # FULL backup followed by PTRACK backup - self.backup_node(backup_dir, 'node', node, options=['--stream']) - self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + self.pb.backup_node('node', node, options=['--stream']) + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream']) # run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel nBenchDuration = 30 @@ -61,7 +58,7 @@ def test_pbckp150(self): # several PTRACK backups for i in range(nBenchDuration): print("[{}] backing up PTRACK diff...".format(i+1)) - self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE']) + self.pb.backup_node('node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE']) sleep(0.1) # if the activity pgbench has finished, stop backing up if pgbench.poll() is not None: @@ -72,6 +69,6 @@ def test_pbckp150(self): pgbench.wait() pgbenchval.wait() - backups = self.show_pb(backup_dir, 'node') + backups = self.pb.show('node') for b in backups: self.assertEqual("OK", b['status']) diff --git a/tests/time_stamp_test.py b/tests/time_stamp_test.py index 170c62cd4..7398556f9 100644 --- a/tests/time_stamp_test.py +++ b/tests/time_stamp_test.py @@ -1,96 +1,78 @@ import os import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest import subprocess from time import sleep -class TimeStamp(ProbackupTest, unittest.TestCase): +class TimeStamp(ProbackupTest): def test_start_time_format(self): """Test backup ID changing after start-time editing in backup.control. We should convert local time in UTC format""" # Create simple node - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums']) - + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + backup_dir = self.backup_dir + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.start() - backup_id = self.backup_node(backup_dir, 'node', node, options=['--stream', '-j 2']) - show_backup = self.show_pb(backup_dir, 'node') - - i = 0 - while i < 2: - with open(os.path.join(backup_dir, "backups", "node", backup_id, "backup.control"), "r+") as f: - output = "" - for line in f: - if line.startswith('start-time') is True: - if i == 0: - output = output + str(line[:-5])+'+00\''+'\n' - else: - output = output + str(line[:-5]) + '\'' + '\n' + backup_id = self.pb.backup_node('node', node, options=['--stream', '-j 2']) + show_backup = self.pb.show('node') + + for i in range(2): + with self.modify_backup_control(backup_dir, 'node', backup_id) as cf: + lines = cf.data.splitlines(keepends=True) + for j, line in enumerate(lines): + if not line.startswith('start-time'): + continue + if i == 0: + lines[j] = line[:-5] + "+00'\n" else: - output = output + str(line) - f.close() - - with open(os.path.join(backup_dir, "backups", "node", backup_id, "backup.control"), "w") as fw: - fw.write(output) - fw.flush() - show_backup = show_backup + self.show_pb(backup_dir, 'node') - i += 1 + lines[j] = line[:-5] + "'\n" + cf.data = "".join(lines) + show_backup = show_backup + self.pb.show('node') print(show_backup[1]['id']) print(show_backup[2]['id']) self.assertTrue(show_backup[1]['id'] == show_backup[2]['id'], "ERROR: Localtime format using instead of UTC") - output = self.show_pb(backup_dir, as_json=False, as_text=True) + output = self.pb.show(as_json=False, as_text=True) self.assertNotIn("backup ID in control file", output) node.stop() def test_server_date_style(self): """Issue #112""" - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), + node = self.pg_node.make_simple('node', set_replication=True, - initdb_params=['--data-checksums'], pg_options={"datestyle": "GERMAN, DMY"}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.start() - self.backup_node( - backup_dir, 'node', node, options=['--stream', '-j 2']) + self.pb.backup_node('node', node, options=['--stream', '-j 2']) def test_handling_of_TZ_env_variable(self): """Issue #284""" - node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.start() my_env = os.environ.copy() my_env["TZ"] = "America/Detroit" - self.backup_node( - backup_dir, 'node', node, options=['--stream', '-j 2'], env=my_env) + self.pb.backup_node('node', node, options=['--stream', '-j 2'], env=my_env) - output = self.show_pb(backup_dir, 'node', as_json=False, as_text=True, env=my_env) + output = self.pb.show('node', as_json=False, as_text=True, env=my_env) self.assertNotIn("backup ID in control file", output) @@ -98,14 +80,11 @@ def test_handling_of_TZ_env_variable(self): # @unittest.expectedFailure def test_dst_timezone_handling(self): """for manual testing""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() print(subprocess.Popen( @@ -124,7 +103,7 @@ def test_dst_timezone_handling(self): stderr=subprocess.PIPE).communicate() # FULL - output = self.backup_node(backup_dir, 'node', node, return_id=False) + output = self.pb.backup_node('node', node, return_id=False) self.assertNotIn("backup ID in control file", output) # move to dst @@ -134,8 +113,7 @@ def test_dst_timezone_handling(self): stderr=subprocess.PIPE).communicate() # DELTA - output = self.backup_node( - backup_dir, 'node', node, backup_type='delta', return_id=False) + output = self.pb.backup_node('node', node, backup_type='delta', return_id=False) self.assertNotIn("backup ID in control file", output) subprocess.Popen( @@ -144,9 +122,9 @@ def test_dst_timezone_handling(self): stderr=subprocess.PIPE).communicate() # DELTA - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') - output = self.show_pb(backup_dir, as_json=False, as_text=True) + output = self.pb.show(as_json=False, as_text=True) self.assertNotIn("backup ID in control file", output) subprocess.Popen( @@ -156,9 +134,9 @@ def test_dst_timezone_handling(self): sleep(10) - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') - output = self.show_pb(backup_dir, as_json=False, as_text=True) + output = self.pb.show(as_json=False, as_text=True) self.assertNotIn("backup ID in control file", output) subprocess.Popen( @@ -169,14 +147,11 @@ def test_dst_timezone_handling(self): @unittest.skip("skip") def test_dst_timezone_handling_backward_compatibilty(self): """for manual testing""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() subprocess.Popen( @@ -195,7 +170,7 @@ def test_dst_timezone_handling_backward_compatibilty(self): stderr=subprocess.PIPE).communicate() # FULL - self.backup_node(backup_dir, 'node', node, old_binary=True, return_id=False) + self.pb.backup_node('node', node, old_binary=True, return_id=False) # move to dst subprocess.Popen( @@ -204,8 +179,7 @@ def test_dst_timezone_handling_backward_compatibilty(self): stderr=subprocess.PIPE).communicate() # DELTA - output = self.backup_node( - backup_dir, 'node', node, backup_type='delta', old_binary=True, return_id=False) + output = self.pb.backup_node('node', node, backup_type='delta', old_binary=True, return_id=False) subprocess.Popen( ['sudo', 'timedatectl', 'set-time', '2020-12-01 12:00:00'], @@ -213,9 +187,9 @@ def test_dst_timezone_handling_backward_compatibilty(self): stderr=subprocess.PIPE).communicate() # DELTA - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') - output = self.show_pb(backup_dir, as_json=False, as_text=True) + output = self.pb.show(as_json=False, as_text=True) self.assertNotIn("backup ID in control file", output) subprocess.Popen( @@ -225,9 +199,9 @@ def test_dst_timezone_handling_backward_compatibilty(self): sleep(10) - self.backup_node(backup_dir, 'node', node, backup_type='delta') + self.pb.backup_node('node', node, backup_type='delta') - output = self.show_pb(backup_dir, as_json=False, as_text=True) + output = self.pb.show(as_json=False, as_text=True) self.assertNotIn("backup ID in control file", output) subprocess.Popen( diff --git a/tests/validate_test.py b/tests/validate_test.py index 4ff44941f..3b97171d4 100644 --- a/tests/validate_test.py +++ b/tests/validate_test.py @@ -1,15 +1,17 @@ import os -import unittest -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import ProbackupTest +from pg_probackup2.gdb import needs_gdb from datetime import datetime, timedelta from pathlib import Path -import subprocess -from sys import exit import time import hashlib -class ValidateTest(ProbackupTest, unittest.TestCase): +class ValidateTest(ProbackupTest): + + def setUp(self): + super().setUp() + self.test_env["PGPROBACKUP_TESTS_SKIP_HIDDEN"] = "ON" # @unittest.skip("skip") # @unittest.expectedFailure @@ -17,14 +19,11 @@ def test_basic_validate_nullified_heap_page_backup(self): """ make node with nullified heap block """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=3) @@ -43,26 +42,27 @@ def test_basic_validate_nullified_heap_page_backup(self): f.seek(8192) f.write(b"\x00"*8192) f.flush() - f.close - self.backup_node( - backup_dir, 'node', node, options=['--log-level-file=verbose']) + self.pb.backup_node( + 'node', node, options=['--log-level-file=verbose']) pgdata = self.pgdata_content(node.data_dir) + log_content = self.read_pb_log() + self.assertIn( + 'File: {0} blknum 1, empty zeroed page'.format(file_path), + log_content, + 'Failed to detect nullified block') if not self.remote: - log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log") - with open(log_file_path) as f: - log_content = f.read() - self.assertIn( + self.assertIn( 'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()), log_content, 'Failed to detect nullified block') - self.validate_pb(backup_dir, options=["-j", "4"]) + self.pb.validate(options=["-j", "4"]) node.cleanup() - self.restore_node(backup_dir, 'node', node) + self.pb.restore_node('node', node=node) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -74,14 +74,12 @@ def test_validate_wal_unreal_values(self): make node with archiving, make archive backup validate to both real and unreal values """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + instance_name = 'node' + node = self.pg_node.make_simple(instance_name) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) node.slow_start() node.pgbench_init(scale=3) @@ -89,60 +87,34 @@ def test_validate_wal_unreal_values(self): con.execute("CREATE TABLE tbl0005 (a text)") con.commit() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node(instance_name, node) node.pgbench_init(scale=3) - target_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] + target_time = self.pb.show( + instance_name, backup_id)['recovery-time'] after_backup_time = datetime.now().replace(second=0, microsecond=0) # Validate to real time - self.assertIn( - "INFO: Backup validation completed successfully", - self.validate_pb( - backup_dir, 'node', - options=["--time={0}".format(target_time), "-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + validate_result = self.pb.validate(instance_name, options=[f"--recovery-target-time={target_time}", "-j", "4"]) + self.assertMessage(validate_result, contains="INFO: Backup validation completed successfully") # Validate to unreal time unreal_time_1 = after_backup_time - timedelta(days=2) - try: - self.validate_pb( - backup_dir, 'node', options=["--time={0}".format( - unreal_time_1), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of validation to unreal time.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup satisfying target options is not found', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + error_result = self.pb.validate(instance_name, + options=[f"--time={unreal_time_1}", "-j", "4"], + expect_error=True) + + self.assertMessage(error_result, contains='ERROR: Backup satisfying target options is not found') # Validate to unreal time #2 unreal_time_2 = after_backup_time + timedelta(days=2) - try: - self.validate_pb( - backup_dir, 'node', - options=["--time={0}".format(unreal_time_2), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of validation to unreal time.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Not enough WAL records to time' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + error_result = self.pb.validate(instance_name, + options=["--time={0}".format(unreal_time_2), "-j", "4"], + expect_error=True) + self.assertMessage(error_result, contains='ERROR: Not enough WAL records to time') # Validate to real xid - target_xid = None with node.connect("postgres") as con: res = con.execute( "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") @@ -151,59 +123,25 @@ def test_validate_wal_unreal_values(self): self.switch_wal_segment(node) time.sleep(5) - self.assertIn( - "INFO: Backup validation completed successfully", - self.validate_pb( - backup_dir, 'node', options=["--xid={0}".format(target_xid), - "-j", "4"]), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + output = self.pb.validate(instance_name, + options=["--xid={0}".format(target_xid), "-j", "4"]) + self.assertMessage(output, contains="INFO: Backup validation completed successfully") # Validate to unreal xid unreal_xid = int(target_xid) + 1000 - try: - self.validate_pb( - backup_dir, 'node', options=["--xid={0}".format(unreal_xid), - "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of validation to unreal xid.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Not enough WAL records to xid' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + error_result = self.pb.validate(instance_name, + options=["--xid={0}".format(unreal_xid), "-j", "4"], + expect_error=True) + self.assertMessage(error_result, contains='ERROR: Not enough WAL records to xid') # Validate with backup ID - output = self.validate_pb(backup_dir, 'node', backup_id, + output = self.pb.validate(instance_name, backup_id, options=["-j", "4"]) - self.assertIn( - "INFO: Validating backup {0}".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - self.assertIn( - "INFO: Backup {0} data files are valid".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - self.assertIn( - "INFO: Backup {0} WAL segments are valid".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - self.assertIn( - "INFO: Backup {0} is valid".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) - self.assertIn( - "INFO: Validate of backup {0} completed".format(backup_id), - output, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + self.assertMessage(output, contains=f"INFO: Validating backup {backup_id}") + self.assertMessage(output, contains=f"INFO: Backup {backup_id} data files are valid") + self.assertMessage(output, contains=f"INFO: Backup {backup_id} WAL segments are valid") + self.assertMessage(output, contains=f"INFO: Backup {backup_id} is valid") + self.assertMessage(output, contains=f"INFO: Validate of backup {backup_id} completed") # @unittest.skip("skip") def test_basic_validate_corrupted_intermediate_backup(self): @@ -213,18 +151,17 @@ def test_basic_validate_corrupted_intermediate_backup(self): run validate on PAGE1, expect PAGE1 to gain status CORRUPT and PAGE2 gain status ORPHAN """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + instance_name = 'node' + node = self.pg_node.make_simple(instance_name) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) node.slow_start() # FULL - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_full = self.pb.backup_node(instance_name, node) node.safe_psql( "postgres", @@ -235,8 +172,8 @@ def test_basic_validate_corrupted_intermediate_backup(self): "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_page = self.pb.backup_node( + instance_name, node, backup_type='page') node.safe_psql( "postgres", @@ -244,47 +181,25 @@ def test_basic_validate_corrupted_intermediate_backup(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(10000,20000) i") # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_page_2 = self.pb.backup_node( + instance_name, node, backup_type='page') # Corrupt some file - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id_2, 'database', file_path) - with open(file, "r+b", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, instance_name, backup_id_page, + f'database/{file_path}', damage=(42, b"blah")) # Simple validate - try: - self.validate_pb( - backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating parents for backup {0}'.format( - backup_id_2) in e.message and - 'ERROR: Backup {0} is corrupt'.format( - backup_id_2) in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + error_result = self.pb.validate(instance_name, backup_id=backup_id_page, + options=["-j", "4"], expect_error=True) + self.assertMessage(error_result, contains=f'INFO: Validating parents for backup {backup_id_page}') + self.assertMessage(error_result, contains=f'ERROR: Backup {backup_id_page} is corrupt') + self.assertMessage(error_result, contains=f'WARNING: Backup {backup_id_page} data files are corrupted') - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], - 'Backup STATUS should be "CORRUPT"') - self.assertEqual( - 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_3)['status'], - 'Backup STATUS should be "ORPHAN"') + page_backup_status = self.pb.show(instance_name, backup_id_page)['status'] + self.assertEqual('CORRUPT', page_backup_status, 'Backup STATUS should be "CORRUPT"') + + second_page_backup_status = self.pb.show(instance_name, backup_id_page_2)['status'] + self.assertEqual('ORPHAN', second_page_backup_status, 'Backup STATUS should be "ORPHAN"') # @unittest.skip("skip") def test_validate_corrupted_intermediate_backups(self): @@ -294,14 +209,13 @@ def test_validate_corrupted_intermediate_backups(self): expect FULL and PAGE1 to gain status CORRUPT and PAGE2 gain status ORPHAN """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + instance_name = 'node' + node = self.pg_node.make_simple(instance_name) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) node.slow_start() node.safe_psql( @@ -313,7 +227,7 @@ def test_validate_corrupted_intermediate_backups(self): "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() # FULL - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node(instance_name, node) node.safe_psql( "postgres", @@ -324,8 +238,8 @@ def test_validate_corrupted_intermediate_backups(self): "postgres", "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip() # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node( + instance_name, node, backup_type='page') node.safe_psql( "postgres", @@ -333,74 +247,39 @@ def test_validate_corrupted_intermediate_backups(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(20000,30000) i") # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + instance_name, node, backup_type='page') # Corrupt some file in FULL backup - file_full = os.path.join( - backup_dir, 'backups', 'node', - backup_id_1, 'database', file_path_t_heap) - with open(file_full, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, instance_name, backup_id_1, + f'database/{file_path_t_heap}', + damage=(84, b"blah")) # Corrupt some file in PAGE1 backup - file_page1 = os.path.join( - backup_dir, 'backups', 'node', - backup_id_2, 'database', file_path_t_heap_1) - with open(file_page1, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, instance_name, backup_id_2, + f'database/{file_path_t_heap_1}', + damage=(42, b"blah")) # Validate PAGE1 - try: - self.validate_pb( - backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating parents for backup {0}'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n ' - 'CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_1) in e.message and - 'WARNING: Invalid CRC of backup file' in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his parent'.format( - backup_id_2) in e.message and - 'WARNING: Backup {0} is orphaned because his parent'.format( - backup_id_3) in e.message and - 'ERROR: Backup {0} is orphan.'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual( - 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id_1)['status'], + error_result = self.pb.validate(instance_name, backup_id=backup_id_2, + options=["-j", "4"], expect_error=True) + self.assertMessage(error_result, contains=f'INFO: Validating parents for backup {backup_id_2}') + self.assertMessage(error_result, contains=f'INFO: Validating backup {backup_id_1}') + self.assertMessage(error_result, contains=f'WARNING: Invalid CRC of backup file') + self.assertMessage(error_result, contains=f'WARNING: Backup {backup_id_1} data files are corrupted') + self.assertMessage(error_result, contains=f'WARNING: Backup {backup_id_2} is orphaned because his parent') + self.assertMessage(error_result, contains=f'WARNING: Backup {backup_id_3} is orphaned because his parent') + self.assertMessage(error_result, contains=f'ERROR: Backup {backup_id_2} is orphan.') + self.assertEqual('CORRUPT', + self.pb.show(instance_name, backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], + self.pb.show(instance_name, backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_3)['status'], + self.pb.show(instance_name, backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') # @unittest.skip("skip") @@ -412,79 +291,49 @@ def test_validate_specific_error_intermediate_backups(self): purpose of this test is to be sure that not only CORRUPT backup descendants can be orphanized """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + instance_name = 'node' + node = self.pg_node.make_simple(instance_name) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) node.slow_start() # FULL - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node(instance_name, node) # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node( + instance_name, node, backup_type='page') # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + instance_name, node, backup_type='page') # Change FULL backup status to ERROR - control_path = os.path.join( - backup_dir, 'backups', 'node', backup_id_1, 'backup.control') - - with open(control_path, 'r') as f: - actual_control = f.read() - - new_control_file = '' - for line in actual_control.splitlines(): - new_control_file += line.replace( - 'status = OK', 'status = ERROR') - new_control_file += '\n' - - with open(control_path, 'wt') as f: - f.write(new_control_file) - f.flush() - f.close() + self.change_backup_status(self.backup_dir, instance_name, backup_id_1, 'ERROR') # Validate PAGE1 - try: - self.validate_pb( - backup_dir, 'node', backup_id=backup_id_2, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because backup has status ERROR.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: ERROR'.format( - backup_id_2, backup_id_1) in e.message and - 'INFO: Validating parents for backup {0}'.format( - backup_id_2) in e.message and - 'WARNING: Backup {0} has status ERROR. Skip validation.'.format( - backup_id_1) and - 'ERROR: Backup {0} is orphan.'.format(backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n ' - 'CMD: {1}'.format( - repr(e.message), self.cmd)) + error_message = self.pb.validate(instance_name, backup_id=backup_id_2, options=["-j", "4"], + expect_error=True) + self.assertMessage(error_message, contains=f'WARNING: Backup {backup_id_2} is orphaned because his parent {backup_id_1} has status: ERROR') + self.assertMessage(error_message, contains=f'INFO: Validating parents for backup {backup_id_2}') + self.assertMessage(error_message, contains=f'WARNING: Backup {backup_id_1} has status ERROR. Skip validation.') + self.assertMessage(error_message, contains=f'ERROR: Backup {backup_id_2} is orphan.') + self.assertMessage(error_message, contains=f'ERROR: Backup {backup_id_2} is orphan.') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node', backup_id_1)['status'], + self.pb.show(instance_name, backup_id_1)['status'], 'Backup STATUS should be "ERROR"') self.assertEqual( 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], + self.pb.show(instance_name, backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_3)['status'], + self.pb.show(instance_name, backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') # @unittest.skip("skip") @@ -496,75 +345,45 @@ def test_validate_error_intermediate_backups(self): purpose of this test is to be sure that not only CORRUPT backup descendants can be orphanized """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + instance_name = 'node' + node = self.pg_node.make_simple(instance_name) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance(instance_name, node) + self.pb.set_archiving(instance_name, node) node.slow_start() # FULL - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node(instance_name, node) # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node( + instance_name, node, backup_type='page') # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + instance_name, node, backup_type='page') # Change FULL backup status to ERROR - control_path = os.path.join( - backup_dir, 'backups', 'node', backup_id_1, 'backup.control') - - with open(control_path, 'r') as f: - actual_control = f.read() - - new_control_file = '' - for line in actual_control.splitlines(): - new_control_file += line.replace( - 'status = OK', 'status = ERROR') - new_control_file += '\n' - - with open(control_path, 'wt') as f: - f.write(new_control_file) - f.flush() - f.close() + self.change_backup_status(self.backup_dir, instance_name, backup_id_1, 'ERROR') # Validate instance - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because backup has status ERROR.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "WARNING: Backup {0} is orphaned because " - "his parent {1} has status: ERROR".format( - backup_id_2, backup_id_1) in e.message and - 'WARNING: Backup {0} has status ERROR. Skip validation'.format( - backup_id_1) in e.message and - "WARNING: Some backups are not valid" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + error_message = self.pb.validate(options=["-j", "4"], expect_error=True) + self.assertMessage(error_message, contains=f'WARNING: Backup {backup_id_2} is orphaned because his parent {backup_id_1} has status: ERROR') + self.assertMessage(error_message, contains=f'WARNING: Backup {backup_id_1} has status ERROR. Skip validation') self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node', backup_id_1)['status'], + self.pb.show(instance_name, backup_id_1)['status'], 'Backup STATUS should be "ERROR"') self.assertEqual( 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], + self.pb.show(instance_name, backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( 'ORPHAN', - self.show_pb(backup_dir, 'node', backup_id_3)['status'], + self.pb.show(instance_name, backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') # @unittest.skip("skip") @@ -575,18 +394,16 @@ def test_validate_corrupted_intermediate_backups_1(self): expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 to gain status ORPHAN """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node('node', node) # PAGE1 node.safe_psql( @@ -594,8 +411,8 @@ def test_validate_corrupted_intermediate_backups_1(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE2 node.safe_psql( @@ -606,8 +423,8 @@ def test_validate_corrupted_intermediate_backups_1(self): file_page_2 = node.safe_psql( "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE3 node.safe_psql( @@ -615,8 +432,8 @@ def test_validate_corrupted_intermediate_backups_1(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(10000,20000) i") - backup_id_4 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_4 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE4 node.safe_psql( @@ -624,8 +441,8 @@ def test_validate_corrupted_intermediate_backups_1(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(20000,30000) i") - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_5 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE5 node.safe_psql( @@ -636,8 +453,8 @@ def test_validate_corrupted_intermediate_backups_1(self): file_page_5 = node.safe_psql( "postgres", "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() - backup_id_6 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_6 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE6 node.safe_psql( @@ -645,119 +462,64 @@ def test_validate_corrupted_intermediate_backups_1(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(30000,40000) i") - backup_id_7 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_7 = self.pb.backup_node( + 'node', node, backup_type='page') # FULL2 - backup_id_8 = self.backup_node(backup_dir, 'node', node) + backup_id_8 = self.pb.backup_node('node', node) # Corrupt some file in PAGE2 and PAGE5 backups - file_page1 = os.path.join( - backup_dir, 'backups', 'node', backup_id_3, 'database', file_page_2) - with open(file_page1, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, 'node', backup_id_3, + f'database/{file_page_2}', + damage=(84, b"blah")) - file_page4 = os.path.join( - backup_dir, 'backups', 'node', backup_id_6, 'database', file_page_5) - with open(file_page4, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, 'node', backup_id_6, + f'database/{file_page_5}', + damage=(42, b"blah")) # Validate PAGE3 - try: - self.validate_pb( - backup_dir, 'node', - backup_id=backup_id_4, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating parents for backup {0}'.format( - backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_1) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_2) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_3) in e.message and - 'WARNING: Invalid CRC of backup file' in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_4, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_5, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_6, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_7, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'ERROR: Backup {0} is orphan'.format(backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n ' - 'CMD: {1}'.format(repr(e.message), self.cmd)) + self.pb.validate('node', + backup_id=backup_id_4, options=["-j", "4"], + expect_error="because of data files corruption") + + self.assertMessage(contains=f'INFO: Validating parents for backup {backup_id_4}') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_1}') + self.assertMessage(contains=f'INFO: Backup {backup_id_1} data files are valid') + self.assertMessage(contains=f'INFO: Backup {backup_id_1} data files are valid') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_2}') + self.assertMessage(contains=f'INFO: Backup {backup_id_2} data files are valid') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_3}') + self.assertMessage(contains=f'WARNING: Invalid CRC of backup file') + self.assertMessage(contains=f'WARNING: Backup {backup_id_3} data files are corrupted') + self.assertMessage(contains=f'WARNING: Backup {backup_id_4} is orphaned because his parent {backup_id_3} has status: CORRUPT') + self.assertMessage(contains=f'WARNING: Backup {backup_id_5} is orphaned because his parent {backup_id_3} has status: CORRUPT') + self.assertMessage(contains=f'WARNING: Backup {backup_id_6} is orphaned because his parent {backup_id_3} has status: CORRUPT') + self.assertMessage(contains=f'WARNING: Backup {backup_id_7} is orphaned because his parent {backup_id_3} has status: CORRUPT') + self.assertMessage(contains=f'ERROR: Backup {backup_id_4} is orphan') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'OK', self.pb.show('node', backup_id_1)['status'], 'Backup STATUS should be "OK"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'OK', self.pb.show('node', backup_id_2)['status'], 'Backup STATUS should be "OK"') self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'CORRUPT', self.pb.show('node', backup_id_3)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'ORPHAN', self.pb.show('node', backup_id_4)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'ORPHAN', self.pb.show('node', backup_id_5)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_6)['status'], + 'ORPHAN', self.pb.show('node', backup_id_6)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_7)['status'], + 'ORPHAN', self.pb.show('node', backup_id_7)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], + 'OK', self.pb.show('node', backup_id_8)['status'], 'Backup STATUS should be "OK"') # @unittest.skip("skip") @@ -768,18 +530,16 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 to gain status ORPHAN """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node('node', node) # PAGE1 node.safe_psql( @@ -787,8 +547,8 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE2 node.safe_psql( @@ -799,8 +559,8 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): file_page_2 = node.safe_psql( "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE3 node.safe_psql( @@ -808,8 +568,8 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(10000,20000) i") - backup_id_4 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_4 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE4 node.safe_psql( @@ -824,8 +584,8 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(30001, 30001) i RETURNING (xmin)").decode('utf-8').rstrip() - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_5 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE5 node.safe_psql( @@ -836,8 +596,8 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): file_page_5 = node.safe_psql( "postgres", "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() - backup_id_6 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_6 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE6 node.safe_psql( @@ -845,108 +605,51 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(30000,40000) i") - backup_id_7 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_7 = self.pb.backup_node( + 'node', node, backup_type='page') # FULL2 - backup_id_8 = self.backup_node(backup_dir, 'node', node) + backup_id_8 = self.pb.backup_node('node', node) # Corrupt some file in PAGE2 and PAGE5 backups - file_page1 = os.path.join( - backup_dir, 'backups', 'node', - backup_id_3, 'database', file_page_2) - with open(file_page1, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - file_page4 = os.path.join( - backup_dir, 'backups', 'node', - backup_id_6, 'database', file_page_5) - with open(file_page4, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, 'node', backup_id_3, + f'database/{file_page_2}', + damage=(84, b"blah")) + self.corrupt_backup_file(self.backup_dir, 'node', backup_id_6, + f'database/{file_page_5}', + damage=(42, b"blah")) # Validate PAGE3 - try: - self.validate_pb( - backup_dir, 'node', - options=[ - '-i', backup_id_4, '--xid={0}'.format(target_xid), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating parents for backup {0}'.format( - backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_1) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_2) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_3) in e.message and - 'WARNING: Invalid CRC of backup file' in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his ' - 'parent {1} has status: CORRUPT'.format( - backup_id_4, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his ' - 'parent {1} has status: CORRUPT'.format( - backup_id_5, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his ' - 'parent {1} has status: CORRUPT'.format( - backup_id_6, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Backup {0} is orphaned because his ' - 'parent {1} has status: CORRUPT'.format( - backup_id_7, backup_id_3) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'ERROR: Backup {0} is orphan'.format( - backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "OK"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "OK"') - self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "CORRUPT"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_6)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_7)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], 'Backup STATUS should be "OK"') + self.pb.validate('node', + options=['-i', backup_id_4, '--xid', target_xid, "-j", "4"], + expect_error="because of data files corruption") + + self.assertMessage(contains=f'INFO: Validating parents for backup {backup_id_4}') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_1}') + self.assertMessage(contains=f'INFO: Backup {backup_id_1} data files are valid') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_2}') + self.assertMessage(contains=f'INFO: Backup {backup_id_2} data files are valid') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_3}') + self.assertMessage(contains='WARNING: Invalid CRC of backup file') + self.assertMessage(contains=f'WARNING: Backup {backup_id_3} data files are corrupted') + self.assertMessage(contains=f'WARNING: Backup {backup_id_4} is orphaned because his ' + f'parent {backup_id_3} has status: CORRUPT') + self.assertMessage(contains=f'WARNING: Backup {backup_id_5} is orphaned because his ' + f'parent {backup_id_3} has status: CORRUPT') + self.assertMessage(contains=f'WARNING: Backup {backup_id_6} is orphaned because his ' + f'parent {backup_id_3} has status: CORRUPT') + self.assertMessage(contains=f'WARNING: Backup {backup_id_7} is orphaned because his ' + f'parent {backup_id_3} has status: CORRUPT') + self.assertMessage(contains=f'ERROR: Backup {backup_id_4} is orphan') + + self.assertEqual('OK', self.pb.show('node', backup_id_1)['status'], 'Backup STATUS should be "OK"') + self.assertEqual('OK', self.pb.show('node', backup_id_2)['status'], 'Backup STATUS should be "OK"') + self.assertEqual('CORRUPT', self.pb.show('node', backup_id_3)['status'], 'Backup STATUS should be "CORRUPT"') + self.assertEqual('ORPHAN', self.pb.show('node', backup_id_4)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.pb.show('node', backup_id_5)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.pb.show('node', backup_id_6)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.pb.show('node', backup_id_7)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('OK', self.pb.show('node', backup_id_8)['status'], 'Backup STATUS should be "OK"') # @unittest.skip("skip") def test_validate_instance_with_several_corrupt_backups(self): @@ -956,25 +659,23 @@ def test_validate_instance_with_several_corrupt_backups(self): expect FULL1 to gain status CORRUPT, PAGE1_1 to gain status ORPHAN FULL2 to gain status CORRUPT, PAGE2_1 to gain status ORPHAN """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( "postgres", "create table t_heap as select generate_series(0,1) i") # FULL1 - backup_id_1 = self.backup_node( - backup_dir, 'node', node, options=['--no-validate']) + backup_id_1 = self.pb.backup_node( + 'node', node, options=['--no-validate']) # FULL2 - backup_id_2 = self.backup_node(backup_dir, 'node', node) + backup_id_2 = self.pb.backup_node('node', node) rel_path = node.safe_psql( "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() @@ -983,94 +684,78 @@ def test_validate_instance_with_several_corrupt_backups(self): "postgres", "insert into t_heap values(2)") - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + 'node', node, backup_type='page') # FULL3 - backup_id_4 = self.backup_node(backup_dir, 'node', node) + backup_id_4 = self.pb.backup_node('node', node) node.safe_psql( "postgres", "insert into t_heap values(3)") - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_5 = self.pb.backup_node( + 'node', node, backup_type='page') # FULL4 - backup_id_6 = self.backup_node( - backup_dir, 'node', node, options=['--no-validate']) + backup_id_6 = self.pb.backup_node( + 'node', node, options=['--no-validate']) # Corrupt some files in FULL2 and FULL3 backup - os.remove(os.path.join( - backup_dir, 'backups', 'node', backup_id_2, - 'database', rel_path)) - os.remove(os.path.join( - backup_dir, 'backups', 'node', backup_id_4, - 'database', rel_path)) + self.remove_backup_file(self.backup_dir, 'node', backup_id_2, + f'database/{rel_path}') + self.remove_backup_file(self.backup_dir, 'node', backup_id_4, + f'database/{rel_path}') # Validate Instance - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4", "--log-level-file=LOG"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "INFO: Validate backups of the instance 'node'" in e.message, - "\n Unexpected Error Message: {0}\n " - "CMD: {1}".format(repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Some backups are not valid' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', options=["-j", "4", "--log-level-file=LOG"], + expect_error="because of data files corruption") + self.assertMessage(contains="INFO: Validate backups of the instance 'node'") + self.assertMessage(contains='WARNING: Some backups are not valid') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'OK', self.pb.show('node', backup_id_1)['status'], 'Backup STATUS should be "OK"') self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'CORRUPT', self.pb.show('node', backup_id_2)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'ORPHAN', self.pb.show('node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'CORRUPT', self.pb.show('node', backup_id_4)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'ORPHAN', self.pb.show('node', backup_id_5)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_6)['status'], + 'OK', self.pb.show('node', backup_id_6)['status'], 'Backup STATUS should be "OK"') # @unittest.skip("skip") + @needs_gdb def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ check that interrupt during validation is handled correctly """ - self._check_gdb_flag_or_skip_test() - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( "postgres", "create table t_heap as select generate_series(0,1) i") # FULL1 - backup_id_1 = self.backup_node( - backup_dir, 'node', node, options=['--no-validate']) + backup_id_1 = self.pb.backup_node( + 'node', node, options=['--no-validate']) # FULL2 - backup_id_2 = self.backup_node(backup_dir, 'node', node) + backup_id_2 = self.pb.backup_node('node', node) rel_path = node.safe_psql( "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() @@ -1079,65 +764,60 @@ def test_validate_instance_with_several_corrupt_backups_interrupt(self): "postgres", "insert into t_heap values(2)") - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + 'node', node, backup_type='page') # FULL3 - backup_id_4 = self.backup_node(backup_dir, 'node', node) + backup_id_4 = self.pb.backup_node('node', node) node.safe_psql( "postgres", "insert into t_heap values(3)") - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_5 = self.pb.backup_node( + 'node', node, backup_type='page') # FULL4 - backup_id_6 = self.backup_node( - backup_dir, 'node', node, options=['--no-validate']) + backup_id_6 = self.pb.backup_node( + 'node', node, options=['--no-validate']) # Corrupt some files in FULL2 and FULL3 backup - os.remove(os.path.join( - backup_dir, 'backups', 'node', backup_id_1, - 'database', rel_path)) - os.remove(os.path.join( - backup_dir, 'backups', 'node', backup_id_3, - 'database', rel_path)) + self.remove_backup_file(self.backup_dir, 'node', backup_id_1, + f'database/{rel_path}') + self.remove_backup_file(self.backup_dir, 'node', backup_id_3, + f'database/{rel_path}') # Validate Instance - gdb = self.validate_pb( - backup_dir, 'node', options=["-j", "4", "--log-level-file=LOG"], gdb=True) + gdb = self.pb.validate( + 'node', options=["-j", "4", "--log-level-file=LOG"], gdb=True) gdb.set_breakpoint('validate_file_pages') gdb.run_until_break() gdb.continue_execution_until_break() - gdb.remove_all_breakpoints() - gdb._execute('signal SIGINT') + gdb.signal('SIGINT') gdb.continue_execution_until_error() self.assertEqual( - 'DONE', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'DONE', self.pb.show('node', backup_id_1)['status'], 'Backup STATUS should be "OK"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'OK', self.pb.show('node', backup_id_2)['status'], 'Backup STATUS should be "OK"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'OK', self.pb.show('node', backup_id_3)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'OK', self.pb.show('node', backup_id_4)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'OK', self.pb.show('node', backup_id_5)['status'], 'Backup STATUS should be "OK"') self.assertEqual( - 'DONE', self.show_pb(backup_dir, 'node', backup_id_6)['status'], + 'DONE', self.pb.show('node', backup_id_6)['status'], 'Backup STATUS should be "OK"') - log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') - with open(log_file, 'r') as f: - log_content = f.read() - self.assertNotIn( + log_content = self.read_pb_log() + self.assertNotIn( 'Interrupted while locking backup', log_content) # @unittest.skip("skip") @@ -1147,14 +827,12 @@ def test_validate_instance_with_corrupted_page(self): corrupt file in PAGE1 backup and run validate on instance, expect PAGE1 to gain status CORRUPT, PAGE2 to gain status ORPHAN """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1163,7 +841,7 @@ def test_validate_instance_with_corrupted_page(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -1174,8 +852,8 @@ def test_validate_instance_with_corrupted_page(self): "postgres", "select pg_relation_filepath('t_heap1')").decode('utf-8').rstrip() # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node( + 'node', node, backup_type='page') node.safe_psql( "postgres", @@ -1183,104 +861,59 @@ def test_validate_instance_with_corrupted_page(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(20000,30000) i") # PAGE2 - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + 'node', node, backup_type='page') # FULL1 - backup_id_4 = self.backup_node( - backup_dir, 'node', node) + backup_id_4 = self.pb.backup_node( + 'node', node) # PAGE3 - backup_id_5 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_5 = self.pb.backup_node( + 'node', node, backup_type='page') # Corrupt some file in FULL backup - file_full = os.path.join( - backup_dir, 'backups', 'node', backup_id_2, - 'database', file_path_t_heap1) - with open(file_full, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, 'node', backup_id_2, + f'database/{file_path_t_heap1}', + damage=(84, b"blah")) # Validate Instance - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "INFO: Validate backups of the instance 'node'" in e.message, - "\n Unexpected Error Message: {0}\n " - "CMD: {1}".format(repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_5) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_5) in e.message and - 'INFO: Backup {0} WAL segments are valid'.format( - backup_id_5) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_4) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_4) in e.message and - 'INFO: Backup {0} WAL segments are valid'.format( - backup_id_4) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_3) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_3) in e.message and - 'INFO: Backup {0} WAL segments are valid'.format( - backup_id_3) in e.message and - 'WARNING: Backup {0} is orphaned because ' - 'his parent {1} has status: CORRUPT'.format( - backup_id_3, backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_2) in e.message and - 'WARNING: Invalid CRC of backup file' in e.message and - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id_2) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'INFO: Validating backup {0}'.format( - backup_id_1) in e.message and - 'INFO: Backup {0} data files are valid'.format( - backup_id_1) in e.message and - 'INFO: Backup {0} WAL segments are valid'.format( - backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertTrue( - 'WARNING: Some backups are not valid' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', options=["-j", "4"], + expect_error="because of data files corruption") + self.assertMessage(contains="INFO: Validate backups of the instance 'node'") + self.assertMessage(contains=f'INFO: Validating backup {backup_id_5}') + self.assertMessage(contains=f'INFO: Backup {backup_id_5} data files are valid') + self.assertMessage(contains=f'INFO: Backup {backup_id_5} WAL segments are valid') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_4}') + self.assertMessage(contains=f'INFO: Backup {backup_id_4} data files are valid') + self.assertMessage(contains=f'INFO: Backup {backup_id_4} WAL segments are valid') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_3}') + self.assertMessage(contains=f'INFO: Backup {backup_id_3} data files are valid') + self.assertMessage(contains=f'INFO: Backup {backup_id_3} WAL segments are valid') + + self.assertMessage(contains=f'WARNING: Backup {backup_id_3} is orphaned because ' + f'his parent {backup_id_2} has status: CORRUPT') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_2}') + self.assertMessage(contains='WARNING: Invalid CRC of backup file') + self.assertMessage(contains=f'WARNING: Backup {backup_id_2} data files are corrupted') + + self.assertMessage(contains=f'INFO: Validating backup {backup_id_1}') + self.assertMessage(contains=f'INFO: Backup {backup_id_1} data files are valid') + self.assertMessage(contains=f'INFO: Backup {backup_id_1} WAL segments are valid') + self.assertMessage(contains='WARNING: Some backups are not valid') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_1)['status'], + 'OK', self.pb.show('node', backup_id_1)['status'], 'Backup STATUS should be "OK"') self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node', backup_id_2)['status'], + 'CORRUPT', self.pb.show('node', backup_id_2)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( - 'ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], + 'ORPHAN', self.pb.show('node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], + 'OK', self.pb.show('node', backup_id_4)['status'], 'Backup STATUS should be "OK"') self.assertEqual( - 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], + 'OK', self.pb.show('node', backup_id_5)['status'], 'Backup STATUS should be "OK"') # @unittest.skip("skip") @@ -1289,13 +922,12 @@ def test_validate_instance_with_corrupted_full_and_try_restore(self): corrupt file in FULL backup and run validate on instance, expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN, try to restore backup with --no-validation option""" - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1307,7 +939,7 @@ def test_validate_instance_with_corrupted_full_and_try_restore(self): "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -1315,7 +947,7 @@ def test_validate_instance_with_corrupted_full_and_try_restore(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") # PAGE1 - backup_id_2 = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node('node', node, backup_type='page') # PAGE2 node.safe_psql( @@ -1323,10 +955,10 @@ def test_validate_instance_with_corrupted_full_and_try_restore(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(20000,30000) i") - backup_id_3 = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node('node', node, backup_type='page') # FULL1 - backup_id_4 = self.backup_node(backup_dir, 'node', node) + backup_id_4 = self.pb.backup_node('node', node) # PAGE3 node.safe_psql( @@ -1334,59 +966,44 @@ def test_validate_instance_with_corrupted_full_and_try_restore(self): "insert into t_heap select i as id, " "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(30000,40000) i") - backup_id_5 = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id_5 = self.pb.backup_node('node', node, backup_type='page') # Corrupt some file in FULL backup - file_full = os.path.join( - backup_dir, 'backups', 'node', - backup_id_1, 'database', file_path_t_heap) - with open(file_full, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, 'node', backup_id_1, + f'database/{file_path_t_heap}', + damage=(84, b"blah")) # Validate Instance - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual(1, 0, "Expecting Error because of data files corruption.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating backup {0}'.format(backup_id_1) in e.message - and "INFO: Validate backups of the instance 'node'" in e.message - and 'WARNING: Invalid CRC of backup file' in e.message - and 'WARNING: Backup {0} data files are corrupted'.format(backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') + self.pb.validate('node', options=["-j", "4"], + expect_error="because of data files corruption") + self.assertMessage(contains=f'INFO: Validating backup {backup_id_1}') + self.assertMessage(contains="INFO: Validate backups of the instance 'node'") + self.assertMessage(contains='WARNING: Invalid CRC of backup file') + self.assertMessage(contains=f'WARNING: Backup {backup_id_1} data files are corrupted') + + self.assertEqual('CORRUPT', self.pb.show('node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') + self.assertEqual('ORPHAN', self.pb.show('node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.pb.show('node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('OK', self.pb.show('node', backup_id_4)['status'], 'Backup STATUS should be "OK"') + self.assertEqual('OK', self.pb.show('node', backup_id_5)['status'], 'Backup STATUS should be "OK"') node.cleanup() - restore_out = self.restore_node( - backup_dir, 'node', node, + restore_out = self.pb.restore_node( + 'node', node, options=["--no-validate"]) - self.assertIn( - "INFO: Restore of backup {0} completed.".format(backup_id_5), - restore_out, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(self.output), self.cmd)) + self.assertMessage(restore_out, contains="INFO: Restore of backup {0} completed.".format(backup_id_5)) # @unittest.skip("skip") def test_validate_instance_with_corrupted_full(self): """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, corrupt file in FULL backup and run validate on instance, expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN""" - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.safe_psql( @@ -1399,7 +1016,7 @@ def test_validate_instance_with_corrupted_full(self): "postgres", "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip() # FULL1 - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -1408,8 +1025,8 @@ def test_validate_instance_with_corrupted_full(self): "from generate_series(0,10000) i") # PAGE1 - backup_id_2 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_2 = self.pb.backup_node( + 'node', node, backup_type='page') # PAGE2 node.safe_psql( @@ -1418,12 +1035,12 @@ def test_validate_instance_with_corrupted_full(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(20000,30000) i") - backup_id_3 = self.backup_node( - backup_dir, 'node', node, backup_type='page') + backup_id_3 = self.pb.backup_node( + 'node', node, backup_type='page') # FULL1 - backup_id_4 = self.backup_node( - backup_dir, 'node', node) + backup_id_4 = self.pb.backup_node( + 'node', node) # PAGE3 node.safe_psql( @@ -1431,115 +1048,84 @@ def test_validate_instance_with_corrupted_full(self): "insert into t_heap select i as id, " "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(30000,40000) i") - backup_id_5 = self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id_5 = self.pb.backup_node('node', node, backup_type='page') # Corrupt some file in FULL backup - file_full = os.path.join( - backup_dir, 'backups', 'node', - backup_id_1, 'database', file_path_t_heap) - with open(file_full, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, 'node', backup_id_1, + f'database/{file_path_t_heap}', + damage=(84, b"blah")) # Validate Instance - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data files corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'INFO: Validating backup {0}'.format(backup_id_1) in e.message - and "INFO: Validate backups of the instance 'node'" in e.message - and 'WARNING: Invalid CRC of backup file' in e.message - and 'WARNING: Backup {0} data files are corrupted'.format(backup_id_1) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - - self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"') - self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') + self.pb.validate('node', options=["-j", "4"], + expect_error="because of data files corruption") + self.assertMessage(contains=f'INFO: Validating backup {backup_id_1}') + self.assertMessage(contains="INFO: Validate backups of the instance 'node'") + self.assertMessage(contains='WARNING: Invalid CRC of backup file') + self.assertMessage(contains=f'WARNING: Backup {backup_id_1} data files are corrupted') + + self.assertEqual('CORRUPT', self.pb.show('node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') + self.assertEqual('ORPHAN', self.pb.show('node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('ORPHAN', self.pb.show('node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') + self.assertEqual('OK', self.pb.show('node', backup_id_4)['status'], 'Backup STATUS should be "OK"') + self.assertEqual('OK', self.pb.show('node', backup_id_5)['status'], 'Backup STATUS should be "OK"') # @unittest.skip("skip") def test_validate_corrupt_wal_1(self): """make archive node, take FULL1, PAGE1,PAGE2,FULL2,PAGE3,PAGE4 backups, corrupt all wal files, run validate, expect errors""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id_1 = self.backup_node(backup_dir, 'node', node) + backup_id_1 = self.pb.backup_node('node', node) with node.connect("postgres") as con: con.execute("CREATE TABLE tbl0005 (a text)") con.commit() - backup_id_2 = self.backup_node(backup_dir, 'node', node) + backup_id_2 = self.pb.backup_node('node', node) # Corrupt WAL - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] - wals.sort() - for wal in wals: - with open(os.path.join(wals_dir, wal), "rb+", 0) as f: - f.seek(42) - f.write(b"blablablaadssaaaaaaaaaaaaaaa") - f.flush() - f.close + bla = b"blablablaadssaaaaaaaaaaaaaaa" + for wal in self.get_instance_wal_list(self.backup_dir, 'node'): + self.corrupt_instance_wal(self.backup_dir, 'node', wal, 42, bla) # Simple validate - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segments corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Backup' in e.message and - 'WAL segments are corrupted' in e.message and - "WARNING: There are not enough WAL " - "records to consistenly restore backup" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', options=["-j", "4"], + expect_error="because of wal segments corruption") + self.assertMessage(contains='WARNING: Backup') + self.assertMessage(contains='WAL segments are corrupted') + self.assertMessage(contains="WARNING: There are not enough WAL " + "records to consistenly restore backup") self.assertEqual( 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id_1)['status'], + self.pb.show('node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"') self.assertEqual( 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id_2)['status'], + self.pb.show('node', backup_id_2)['status'], 'Backup STATUS should be "CORRUPT"') # @unittest.skip("skip") def test_validate_corrupt_wal_2(self): """make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors""" - node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() with node.connect("postgres") as con: con.execute("CREATE TABLE tbl0005 (a text)") con.commit() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) target_xid = None with node.connect("postgres") as con: res = con.execute( @@ -1548,41 +1134,22 @@ def test_validate_corrupt_wal_2(self): target_xid = res[0][0] # Corrupt WAL - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] - wals.sort() - for wal in wals: - with open(os.path.join(wals_dir, wal), "rb+", 0) as f: - f.seek(128) - f.write(b"blablablaadssaaaaaaaaaaaaaaa") - f.flush() - f.close + bla = b"blablablaadssaaaaaaaaaaaaaaa" + for wal in self.get_instance_wal_list(self.backup_dir, 'node'): + self.corrupt_instance_wal(self.backup_dir, 'node', wal, 128, bla) # Validate to xid - try: - self.validate_pb( - backup_dir, - 'node', - backup_id, - options=[ - "--xid={0}".format(target_xid), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segments corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: Backup' in e.message and - 'WAL segments are corrupted' in e.message and - "WARNING: There are not enough WAL " - "records to consistenly restore backup" in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', backup_id, + options=[f"--xid={target_xid}", "-j", "4"], + expect_error="because of wal segments corruption") + self.assertMessage(contains='WARNING: Backup') + self.assertMessage(contains='WAL segments are corrupted') + self.assertMessage(contains="WARNING: There are not enough WAL " + "records to consistenly restore backup") self.assertEqual( 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id)['status'], + self.pb.show('node', backup_id)['status'], 'Backup STATUS should be "CORRUPT"') # @unittest.skip("skip") @@ -1592,71 +1159,39 @@ def test_validate_wal_lost_segment_1(self): run validate, expecting error because of missing wal segment make sure that backup status is 'CORRUPT' """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() node.pgbench_init(scale=3) - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # Delete wal segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] - wals.sort() - file = os.path.join(backup_dir, 'wal', 'node', wals[-1]) - os.remove(file) - - # cut out '.gz' - if self.archive_compress: - file = file[:-3] - - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "is absent" in e.message and - "WARNING: There are not enough WAL records to consistenly " - "restore backup {0}".format(backup_id) in e.message and - "WARNING: Backup {0} WAL segments are corrupted".format( - backup_id) in e.message and - "WARNING: Some backups are not valid" in e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + wals = self.get_instance_wal_list(self.backup_dir, 'node') + self.remove_instance_wal(self.backup_dir, 'node', max(wals)) + + self.pb.validate('node', options=["-j", "4"], + expect_error="because of wal segment disappearance") + self.assertMessage(contains="is absent") + self.assertMessage(contains="WARNING: There are not enough WAL records to consistenly " + f"restore backup {backup_id}") + self.assertMessage(contains=f"WARNING: Backup {backup_id} WAL segments are corrupted") self.assertEqual( 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id)['status'], + self.pb.show('node', backup_id)['status'], 'Backup {0} should have STATUS "CORRUPT"') # Run validate again - try: - self.validate_pb(backup_dir, 'node', backup_id, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'INFO: Revalidating backup {0}'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'ERROR: Backup {0} is corrupt.'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', backup_id, options=["-j", "4"], + expect_error="because of backup corruption") + self.assertMessage(contains=f'INFO: Revalidating backup {backup_id}') + self.assertMessage(contains=f'ERROR: Backup {backup_id} is corrupt.') # @unittest.skip("skip") def test_validate_corrupt_wal_between_backups(self): @@ -1664,17 +1199,15 @@ def test_validate_corrupt_wal_between_backups(self): make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node') + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) # make some wals node.pgbench_init(scale=3) @@ -1689,62 +1222,42 @@ def test_validate_corrupt_wal_between_backups(self): con.commit() target_xid = res[0][0] - if self.get_version(node) < self.version_to_num('10.0'): - walfile = node.safe_psql( - 'postgres', - 'select pg_xlogfile_name(pg_current_xlog_location())').decode('utf-8').rstrip() - else: - walfile = node.safe_psql( - 'postgres', - 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() + walfile = node.safe_psql( + 'postgres', + 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() - if self.archive_compress: - walfile = walfile + '.gz' + walfile = walfile + self.compress_suffix self.switch_wal_segment(node) # generate some wals node.pgbench_init(scale=3) - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) # Corrupt WAL - wals_dir = os.path.join(backup_dir, 'wal', 'node') - with open(os.path.join(wals_dir, walfile), "rb+", 0) as f: - f.seek(9000) - f.write(b"b") - f.flush() - f.close + self.corrupt_instance_wal(self.backup_dir, 'node', walfile, 9000, b"b") # Validate to xid - try: - self.validate_pb( - backup_dir, - 'node', - backup_id, - options=[ - "--xid={0}".format(target_xid), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segments corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Not enough WAL records to xid' in e.message and - 'WARNING: Recovery can be done up to time' in e.message and - "ERROR: Not enough WAL records to xid {0}\n".format( - target_xid), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', backup_id, + options=[f"--xid={target_xid}", "-j", "4"], + expect_error="because of wal segments corruption") + self.assertMessage(contains='ERROR: Not enough WAL records to xid') + self.assertMessage(contains='WARNING: Recovery can be done up to time') + self.assertMessage(contains=f"ERROR: Not enough WAL records to xid {target_xid}") + + # Validate whole WAL Archive. It shouldn't be error, only warning in LOG. [PBCKP-55] + self.pb.validate('node', + options=[f"--wal", "-j", "4"], expect_error=True) + self.assertMessage(contains='ERROR: WAL archive check error') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[0]['status'], + self.pb.show('node')[0]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[1]['status'], + self.pb.show('node')[1]['status'], 'Backup STATUS should be "OK"') # @unittest.skip("skip") @@ -1753,36 +1266,24 @@ def test_pgpro702_688(self): make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - backup_id = self.backup_node( - backup_dir, 'node', node, options=["--stream"]) - recovery_time = self.show_pb( - backup_dir, 'node', backup_id=backup_id)['recovery-time'] - - try: - self.validate_pb( - backup_dir, 'node', - options=["--time={0}".format(recovery_time), "-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of wal segment disappearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WAL archive is empty. You cannot restore backup to a ' - 'recovery target without WAL archive', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + backup_id = self.pb.backup_node( + 'node', node, options=["--stream"]) + recovery_time = self.pb.show( + 'node', backup_id=backup_id)['recovery-time'] + + self.pb.validate('node', + options=[f"--time={recovery_time}", "-j", "4"], + expect_error="because of wal segment disappearance") + self.assertMessage(contains='WAL archive is empty. You cannot restore backup to a ' + 'recovery target without WAL archive') # @unittest.skip("skip") def test_pgpro688(self): @@ -1790,23 +1291,21 @@ def test_pgpro688(self): make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) - recovery_time = self.show_pb( - backup_dir, 'node', backup_id)['recovery-time'] + backup_id = self.pb.backup_node('node', node) + recovery_time = self.pb.show( + 'node', backup_id)['recovery-time'] - self.validate_pb( - backup_dir, 'node', options=["--time={0}".format(recovery_time), + self.pb.validate( + 'node', options=["--time={0}".format(recovery_time), "-j", "4"]) # @unittest.skip("skip") @@ -1816,22 +1315,17 @@ def test_pgpro561(self): make node with archiving, make stream backup, restore it to node1, check that archiving is not successful on node1 """ - node1 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node1'), - set_replication=True, - initdb_params=['--data-checksums']) + node1 = self.pg_node.make_simple('node1', set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node1', node1) - self.set_archiving(backup_dir, 'node1', node1) + + self.pb.init() + self.pb.add_instance('node1', node1) + self.pb.set_archiving('node1', node1) node1.slow_start() - backup_id = self.backup_node( - backup_dir, 'node1', node1, options=["--stream"]) + backup_id = self.pb.backup_node('node1', node1, options=["--stream"]) - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.pg_node.make_simple('node2') node2.cleanup() node1.psql( @@ -1840,18 +1334,16 @@ def test_pgpro561(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,256) i") - self.backup_node( - backup_dir, 'node1', node1, + self.pb.backup_node( + 'node1', node1, backup_type='page', options=["--stream"]) - self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir) + self.pb.restore_node('node1', node=node2) - self.set_auto_conf( - node2, {'port': node2.port, 'archive_mode': 'off'}) + node2.set_auto_conf({'port': node2.port, 'archive_mode': 'off'}) node2.slow_start() - self.set_auto_conf( - node2, {'archive_mode': 'on'}) + node2.set_auto_conf({'archive_mode': 'on'}) node2.stop() node2.slow_start() @@ -1885,7 +1377,7 @@ def test_pgpro561(self): self.switch_wal_segment(node1) -# wals_dir = os.path.join(backup_dir, 'wal', 'node1') +# wals_dir = os.path.join(self.backup_dir, 'wal', 'node1') # wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( # wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] # wals = map(str, wals) @@ -1893,7 +1385,7 @@ def test_pgpro561(self): self.switch_wal_segment(node2) -# wals_dir = os.path.join(backup_dir, 'wal', 'node1') +# wals_dir = os.path.join(self.backup_dir, 'wal', 'node1') # wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join( # wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')] # wals = map(str, wals) @@ -1923,112 +1415,80 @@ def test_validate_corrupted_full(self): remove corruption and run valudate again, check that second full backup and his page backups are OK """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ + node = self.pg_node.make_simple('node', + set_replication=True, + pg_options={ 'checkpoint_timeout': '30'}) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') - backup_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + backup_id = self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') node.safe_psql( "postgres", "alter system set archive_command = 'false'") node.reload() - try: - self.backup_node( - backup_dir, 'node', node, - backup_type='page', options=['--archive-timeout=1s']) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - pass + self.pb.backup_node('node', node, backup_type='page', + options=['--archive-timeout=1s'], + expect_error="because of data file dissapearance") self.assertTrue( - self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') - self.set_archiving(backup_dir, 'node', node) + self.pb.show('node')[6]['status'] == 'ERROR') + self.pb.set_archiving('node', node) node.reload() - self.backup_node(backup_dir, 'node', node, backup_type='page') - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'postgresql.auto.conf') - - file_new = os.path.join(backup_dir, 'postgresql.auto.conf') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'Validating backup {0}'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Some backups are not valid'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.pb.backup_node('node', node, backup_type='page') + + auto_conf = self.read_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf') + self.remove_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf') + + self.pb.validate(options=["-j", "4"], + expect_error="because of data file dissapearance") + self.assertMessage(contains=f'Validating backup {backup_id}') + self.assertMessage(contains=f'WARNING: Backup {backup_id} data files are corrupted') + self.assertMessage(contains='WARNING: Some backups are not valid') + + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') self.assertTrue( - self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT') + self.pb.show('node')[3]['status'] == 'CORRUPT') self.assertTrue( - self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.pb.show('node')[4]['status'] == 'ORPHAN') self.assertTrue( - self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.pb.show('node')[5]['status'] == 'ORPHAN') self.assertTrue( - self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') + self.pb.show('node')[6]['status'] == 'ERROR') self.assertTrue( - self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') + self.pb.show('node')[7]['status'] == 'ORPHAN') - os.rename(file_new, file) - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - except ProbackupException as e: - self.assertIn( - 'WARNING: Some backups are not valid'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.write_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf', auto_conf) + + self.pb.validate(options=["-j", "4"], + expect_error=True) + self.assertMessage(contains='WARNING: Some backups are not valid') + + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') self.assertTrue( - self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') + self.pb.show('node')[6]['status'] == 'ERROR') + self.assertTrue(self.pb.show('node')[7]['status'] == 'OK') # @unittest.skip("skip") def test_validate_corrupted_full_1(self): @@ -2043,90 +1503,61 @@ def test_validate_corrupted_full_1(self): second page should be CORRUPT third page should be ORPHAN """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - backup_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - backup_id_page = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'postgresql.auto.conf') - - file_new = os.path.join(backup_dir, 'postgresql.auto.conf') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'Validating backup {0}'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Some backups are not valid'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - os.rename(file_new, file) - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id_page, 'database', 'backup_label') - - file_new = os.path.join(backup_dir, 'backup_label') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - except ProbackupException as e: - self.assertIn( - 'WARNING: Some backups are not valid'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') + + backup_id = self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + backup_id_page = self.pb.backup_node( + 'node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') + + auto_conf = self.read_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf') + self.remove_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf') + + self.pb.validate(options=["-j", "4"], + expect_error="because of data file dissapearance") + self.assertMessage(contains=f'Validating backup {backup_id}') + self.assertMessage(contains=f'WARNING: Backup {backup_id} data files are corrupted') + self.assertMessage(contains='WARNING: Some backups are not valid') + + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[3]['status'] == 'CORRUPT') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') + + self.write_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf', auto_conf) + + self.remove_backup_file(self.backup_dir, 'node', backup_id_page, + 'database/backup_label') + + self.pb.validate(options=["-j", "4"], + expect_error=True) + self.assertMessage(contains='WARNING: Some backups are not valid') + + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'CORRUPT') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') # @unittest.skip("skip") def test_validate_corrupted_full_2(self): @@ -2150,341 +1581,124 @@ def test_validate_corrupted_full_2(self): remove corruption from PAGE2_2 and run validate on PAGE2_4 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - corrupt_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - validate_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - file = os.path.join( - backup_dir, 'backups', 'node', - corrupt_id, 'database', 'backup_label') - - file_new = os.path.join(backup_dir, 'backup_label') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'INFO: Validating parents for backup {0}'.format(validate_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[2]['id']), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[3]['id']), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'INFO: Validating backup {0}'.format( - corrupt_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - corrupt_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + + backup_id_3 = self.pb.backup_node('node', node) + backup_id_4 = self.pb.backup_node('node', node, backup_type='page') + corrupt_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_6 = self.pb.backup_node('node', node, backup_type='page') + validate_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_8 = self.pb.backup_node('node', node, backup_type='page') + + backup_label = self.read_backup_file(self.backup_dir, 'node', corrupt_id, + 'database/backup_label') + self.remove_backup_file(self.backup_dir, 'node', corrupt_id, + 'database/backup_label') + + self.pb.validate('node', validate_id, options=["-j", "4"], + expect_error="because of data file dissapearance") + self.assertMessage(contains=f'INFO: Validating parents for backup {validate_id}') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_3}') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_4}') + self.assertMessage(contains=f'INFO: Validating backup {corrupt_id}') + self.assertMessage(contains=f'WARNING: Backup {corrupt_id} data files are corrupted') + + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'CORRUPT') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # THIS IS GOLD!!!! - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'Backup {0} data files are valid'.format( - self.show_pb(backup_dir, 'node')[9]['id']), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'Backup {0} data files are valid'.format( - self.show_pb(backup_dir, 'node')[8]['id']), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Some backups are not valid', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + backup_id_9 = self.pb.backup_node('node', node, backup_type='page') + backup_id_a = self.pb.backup_node('node', node, backup_type='page') + + self.pb.validate('node', options=["-j", "4"], + expect_error="because of data file dissapearance") + self.assertMessage(contains=f'Backup {backup_id_a} data files are valid') + self.assertMessage(contains=f'Backup {backup_id_9} data files are valid') + self.assertMessage(regex=f'WARNING: Backup {backup_id_8} .* parent {corrupt_id} .* CORRUPT') + self.assertMessage(regex=f'WARNING: Backup {validate_id} .* parent {corrupt_id} .* CORRUPT') + self.assertMessage(regex=f'WARNING: Backup {backup_id_6} .* parent {corrupt_id} .* CORRUPT') + self.assertMessage(contains=f'INFO: Revalidating backup {corrupt_id}') + self.assertMessage(contains='WARNING: Some backups are not valid') + + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'CORRUPT') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # revalidate again - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} has status: ORPHAN'.format(validate_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Validating parents for backup {0}'.format( - validate_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[2]['id']), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[3]['id']), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - corrupt_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - corrupt_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'ERROR: Backup {0} is orphan.'.format( - validate_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', validate_id, options=["-j", "4"], + expect_error="because of data file dissapearance") + self.assertMessage(contains=f'WARNING: Backup {validate_id} has status: ORPHAN') + self.assertMessage(contains=f'Backup {backup_id_8} has parent {corrupt_id} with status: CORRUPT') + self.assertMessage(contains=f'Backup {validate_id} has parent {corrupt_id} with status: CORRUPT') + self.assertMessage(contains=f'Backup {backup_id_6} has parent {corrupt_id} with status: CORRUPT') + self.assertMessage(contains=f'INFO: Validating parents for backup {validate_id}') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_3}') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_4}') + self.assertMessage(contains=f'INFO: Revalidating backup {corrupt_id}') + self.assertMessage(contains=f'WARNING: Backup {corrupt_id} data files are corrupted') + self.assertMessage(contains=f'ERROR: Backup {validate_id} is orphan.') # Fix CORRUPT - os.rename(file_new, file) - - output = self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - - self.assertIn( - 'WARNING: Backup {0} has status: ORPHAN'.format(validate_id), - output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[7]['id'], corrupt_id), - output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[6]['id'], corrupt_id), - output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], corrupt_id), - output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Validating parents for backup {0}'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[2]['id']), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Validating backup {0}'.format( - self.show_pb(backup_dir, 'node')[3]['id']), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - corrupt_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} data files are valid'.format( - corrupt_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - self.show_pb(backup_dir, 'node')[5]['id']), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} data files are valid'.format( - self.show_pb(backup_dir, 'node')[5]['id']), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Revalidating backup {0}'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Backup {0} data files are valid'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Backup {0} WAL segments are valid'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Backup {0} is valid.'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'INFO: Validate of backup {0} completed.'.format( - validate_id), output, - '\n Unexpected Output Message: {0}\n'.format( - repr(output))) + self.write_backup_file(self.backup_dir, 'node', corrupt_id, + 'database/backup_label', backup_label) + + self.pb.validate('node', validate_id, options=["-j", "4"]) + + self.assertMessage(contains=f'WARNING: Backup {validate_id} has status: ORPHAN') + self.assertMessage(contains=f'Backup {backup_id_8} has parent {corrupt_id} with status: CORRUPT') + self.assertMessage(contains=f'Backup {validate_id} has parent {corrupt_id} with status: CORRUPT') + self.assertMessage(contains=f'Backup {backup_id_6} has parent {corrupt_id} with status: CORRUPT') + self.assertMessage(contains=f'INFO: Validating parents for backup {validate_id}') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_3}') + self.assertMessage(contains=f'INFO: Validating backup {backup_id_4}') + self.assertMessage(contains=f'INFO: Revalidating backup {corrupt_id}') + self.assertMessage(contains=f'Backup {corrupt_id} data files are valid') + self.assertMessage(contains=f'INFO: Revalidating backup {backup_id_6}') + self.assertMessage(contains=f'Backup {backup_id_6} data files are valid') + self.assertMessage(contains=f'INFO: Revalidating backup {validate_id}') + self.assertMessage(contains=f'Backup {validate_id} data files are valid') + self.assertMessage(contains=f'INFO: Backup {validate_id} WAL segments are valid') + self.assertMessage(contains=f'INFO: Backup {validate_id} is valid.') + self.assertMessage(contains=f'INFO: Validate of backup {validate_id} completed.') # Now we have two perfectly valid backup chains based on FULL2 - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # @unittest.skip("skip") def test_validate_corrupted_full_missing(self): @@ -2498,235 +1712,131 @@ def test_validate_corrupted_full_missing(self): second full backup and his firts page backups are OK, third page should be ORPHAN """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - backup_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - backup_id_page = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - file = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'postgresql.auto.conf') - - file_new = os.path.join(backup_dir, 'postgresql.auto.conf') - os.rename(file, file_new) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of data file dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'Validating backup {0}'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} data files are corrupted'.format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} has status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') + + backup_id = self.pb.backup_node('node', node) + backup_id_6 = self.pb.backup_node('node', node, backup_type='page') + backup_id_page = self.pb.backup_node('node', node, backup_type='page') + backup_id_8 = self.pb.backup_node('node', node, backup_type='page') + backup_id_9 = self.pb.backup_node('node', node, backup_type='page') + + auto_conf = self.read_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf') + self.remove_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf') + + self.pb.validate(options=["-j", "4"], + expect_error="because of data file dissapearance") + self.assertMessage(contains=f'Validating backup {backup_id}') + self.assertMessage(contains=f'WARNING: Backup {backup_id} data files are corrupted') + self.assertMessage(contains=f'WARNING: Backup {backup_id_6} is orphaned because his parent {backup_id} has status: CORRUPT') + + self.assertTrue(self.pb.show('node')[8]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'CORRUPT') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # Full backup is fixed - os.rename(file_new, file) + self.write_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.auto.conf', auto_conf) # break PAGE - old_directory = os.path.join( - backup_dir, 'backups', 'node', backup_id_page) - new_directory = os.path.join(backup_dir, backup_id_page) - os.rename(old_directory, new_directory) - - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - except ProbackupException as e: - self.assertIn( - 'WARNING: Some backups are not valid', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], - backup_id_page), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], - backup_id_page), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: CORRUPT'.format( - self.show_pb(backup_dir, 'node')[5]['id'], backup_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.change_backup_status(self.backup_dir, 'node', backup_id_page, + 'THIS_BACKUP_IS_HIDDEN_FOR_TESTS') + + self.pb.validate(options=["-j", "4"], + expect_error="because backup in chain is removed") + self.assertMessage(contains='WARNING: Some backups are not valid') + self.assertMessage(regex=fr'WARNING: Backup {backup_id_9} (.*(missing|parent {backup_id_page})){{2}}') + self.assertMessage(regex=fr'WARNING: Backup {backup_id_8} (.*(missing|parent {backup_id_page})){{2}}') + self.assertMessage(contains=f'INFO: Backup {backup_id_6} WAL segments are valid') + + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') # missing backup is here - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # validate should be idempotent - user running validate # second time must be provided with ID of missing backup - try: - self.validate_pb(backup_dir, options=["-j", "4"]) - except ProbackupException as e: - self.assertIn( - 'WARNING: Some backups are not valid', e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], - backup_id_page), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate(options=["-j", "4"], + expect_error=True) + self.assertMessage(contains='WARNING: Some backups are not valid') + self.assertMessage(regex=fr'WARNING: Backup {backup_id_9} (.*(missing|parent {backup_id_page})){{2}}') + self.assertMessage(regex=fr'WARNING: Backup {backup_id_8} (.*(missing|parent {backup_id_page})){{2}}') - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], - backup_id_page), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') # missing backup is here - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # fix missing PAGE backup - os.rename(new_directory, old_directory) + self.change_backup_status(self.backup_dir, 'node', backup_id_page, 'ORPHAN') # exit(1) - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - output = self.validate_pb(backup_dir, options=["-j", "4"]) - - self.assertIn( - 'INFO: All backups are valid', - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: ORPHAN'.format( - self.show_pb(backup_dir, 'node')[8]['id'], - self.show_pb(backup_dir, 'node')[6]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'WARNING: Backup {0} has parent {1} with status: ORPHAN'.format( - self.show_pb(backup_dir, 'node')[7]['id'], - self.show_pb(backup_dir, 'node')[6]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Revalidating backup {0}'.format( - self.show_pb(backup_dir, 'node')[6]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Revalidating backup {0}'.format( - self.show_pb(backup_dir, 'node')[7]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertIn( - 'Revalidating backup {0}'.format( - self.show_pb(backup_dir, 'node')[8]['id']), - output, - '\n Unexpected Error Message: {0}\n'.format( - repr(output))) - - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') + + self.pb.validate(options=["-j", "4"]) + + self.assertMessage(contains='INFO: All backups are valid') + self.assertMessage(contains=f'Revalidating backup {backup_id_page}') + self.assertMessage(contains=f'Revalidating backup {backup_id_8}') + self.assertMessage(contains=f'Revalidating backup {backup_id_9}') + + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[6]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') def test_file_size_corruption_no_validate(self): - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - # initdb_params=['--data-checksums'], - ) + node = self.pg_node.make_simple('node', checksum=False) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() @@ -2746,32 +1856,23 @@ def test_file_size_corruption_no_validate(self): "postgres", "select pg_relation_size('t_heap')") - backup_id = self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=["-j", "4"], asynchronous=False, gdb=False) + backup_id = self.pb.backup_node( + 'node', node, backup_type="full", + options=["-j", "4"], gdb=False) node.stop() node.cleanup() # Let`s do file corruption - with open( - os.path.join( - backup_dir, "backups", 'node', backup_id, - "database", heap_path), "rb+", 0) as f: - f.truncate(int(heap_size) - 4096) - f.flush() - f.close + self.corrupt_backup_file(self.backup_dir, 'node', backup_id, + os.path.join("database", heap_path), + truncate=(int(heap_size) - 4096)) node.cleanup() - try: - self.restore_node( - backup_dir, 'node', node, - options=["--no-validate"]) - except ProbackupException as e: - self.assertTrue( - "ERROR: Backup files restoring failed" in e.message, - repr(e.message)) + self.pb.restore_node('node', node=node, options=["--no-validate"], + expect_error=True) + self.assertMessage(contains="ERROR: Backup files restoring failed") # @unittest.skip("skip") def test_validate_specific_backup_with_missing_backup(self): @@ -2789,128 +1890,86 @@ def test_validate_specific_backup_with_missing_backup(self): PAGE1_1 FULL1 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # CHAIN1 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # CHAIN2 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - missing_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - validate_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + missing_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_6 = self.pb.backup_node('node', node, backup_type='page') + validate_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_8 = self.pb.backup_node('node', node, backup_type='page') # CHAIN3 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - old_directory = os.path.join(backup_dir, 'backups', 'node', missing_id) - new_directory = os.path.join(backup_dir, missing_id) - - os.rename(old_directory, new_directory) - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[7]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') + + self.change_backup_status(self.backup_dir, 'node', missing_id, + "THIS_BACKUP_IS_HIDDEN_FOR_TESTS") + + self.pb.validate('node', validate_id, options=["-j", "4"], + expect_error="because of backup dissapearance") + self.assertMessage(contains=f'WARNING: Backup {backup_id_8} is orphaned ' + f'because his parent {missing_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {validate_id} is orphaned ' + f'because his parent {missing_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {backup_id_6} is orphaned ' + f'because his parent {missing_id} is missing') + + self.assertTrue(self.pb.show('node')[10]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') # missing backup - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - os.rename(new_directory, old_directory) + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') + + self.pb.validate('node', validate_id, options=["-j", "4"], + expect_error="because of backup dissapearance") + self.assertMessage(contains=f'WARNING: Backup {backup_id_8} has missing ' + f'parent {missing_id}') + self.assertMessage(contains=f'WARNING: Backup {validate_id} has missing ' + f'parent {missing_id}') + self.assertMessage(contains=f'WARNING: Backup {backup_id_6} has missing ' + f'parent {missing_id}') + + self.change_backup_status(self.backup_dir, 'node', missing_id, "OK") # Revalidate backup chain - self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) - - self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.pb.validate('node', validate_id, options=["-j", "4"]) + + self.assertTrue(self.pb.show('node')[11]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[10]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[7]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[6]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # @unittest.skip("skip") def test_validate_specific_backup_with_missing_backup_1(self): @@ -2928,106 +1987,80 @@ def test_validate_specific_backup_with_missing_backup_1(self): PAGE1_1 FULL1 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # CHAIN1 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # CHAIN2 - missing_full_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - missing_page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - validate_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + missing_full_id = self.pb.backup_node('node', node) + backup_id_5 = self.pb.backup_node('node', node, backup_type='page') + missing_page_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_7 = self.pb.backup_node('node', node, backup_type='page') + validate_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_9 = self.pb.backup_node('node', node, backup_type='page') # CHAIN3 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - page_old_directory = os.path.join( - backup_dir, 'backups', 'node', missing_page_id) - page_new_directory = os.path.join(backup_dir, missing_page_id) - os.rename(page_old_directory, page_new_directory) - - full_old_directory = os.path.join( - backup_dir, 'backups', 'node', missing_full_id) - full_new_directory = os.path.join(backup_dir, missing_full_id) - os.rename(full_old_directory, full_new_directory) - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') + + self.change_backup_status(self.backup_dir, 'node', missing_page_id, + "THIS_BACKUP_IS_HIDDEN_FOR_TESTS") + self.change_backup_status(self.backup_dir, 'node', missing_full_id, + "THIS_BACKUP_IS_HIDDEN_FOR_TESTS") + + self.pb.validate('node', validate_id, options=["-j", "4"], + expect_error="because of backup dissapearance") + self.assertMessage(contains=f'WARNING: Backup {backup_id_9} is orphaned ' + f'because his parent {missing_page_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {validate_id} is orphaned ' + f'because his parent {missing_page_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {backup_id_7} is orphaned ' + f'because his parent {missing_page_id} is missing') + + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'ORPHAN') # PAGE2_1 - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') # <- SHit + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') # <- SHit # FULL2 - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') - os.rename(page_new_directory, page_old_directory) - os.rename(full_new_directory, full_old_directory) + self.change_backup_status(self.backup_dir, 'node', missing_page_id, "OK") + self.change_backup_status(self.backup_dir, 'node', missing_full_id, "OK") # Revalidate backup chain - self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) - - self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') # <- Fail - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.pb.validate('node', validate_id, options=["-j", "4"]) + + self.assertTrue(self.pb.show('node')[11]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[10]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'ORPHAN') # <- Fail + self.assertTrue(self.pb.show('node')[7]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[6]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # @unittest.skip("skip") def test_validate_with_missing_backup_1(self): @@ -3045,174 +2078,112 @@ def test_validate_with_missing_backup_1(self): PAGE1_1 FULL1 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # CHAIN1 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # CHAIN2 - missing_full_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - missing_page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - validate_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + missing_full_id = self.pb.backup_node('node', node) + backup_id_5 = self.pb.backup_node('node', node, backup_type='page') + missing_page_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_7 = self.pb.backup_node('node', node, backup_type='page') + validate_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_9 = self.pb.backup_node('node', node, backup_type='page') # CHAIN3 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # Break PAGE - page_old_directory = os.path.join( - backup_dir, 'backups', 'node', missing_page_id) - page_new_directory = os.path.join(backup_dir, missing_page_id) - os.rename(page_old_directory, page_new_directory) + self.change_backup_status(self.backup_dir, 'node', missing_page_id, + 'THIS_BACKUP_IS_HIDDEN_FOR_TESTS') # Break FULL - full_old_directory = os.path.join( - backup_dir, 'backups', 'node', missing_full_id) - full_new_directory = os.path.join(backup_dir, missing_full_id) - os.rename(full_old_directory, full_new_directory) - - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.change_backup_status(self.backup_dir, 'node', missing_full_id, + 'THIS_BACKUP_IS_HIDDEN_FOR_TESTS') + + self.pb.validate('node', validate_id, options=["-j", "4"], + expect_error="because of backup dissapearance") + self.assertMessage(contains=f'WARNING: Backup {backup_id_9} is orphaned ' + f'because his parent {missing_page_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {validate_id} is orphaned ' + f'because his parent {missing_page_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {backup_id_7} is orphaned ' + f'because his parent {missing_page_id} is missing') + + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'ORPHAN') # PAGE2_2 is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') # FULL1 - is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') - os.rename(page_new_directory, page_old_directory) + self.change_backup_status(self.backup_dir, 'node', missing_page_id, 'OK') # Revalidate backup chain - try: - self.validate_pb(backup_dir, 'node', validate_id, - options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} has status: ORPHAN'.format( - validate_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[5]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[3]['id'], - missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') + self.pb.validate('node', validate_id, options=["-j", "4"], + expect_error="because of backup dissapearance") + self.assertMessage(contains=f'WARNING: Backup {validate_id} has status: ORPHAN') + self.assertMessage(contains=f'WARNING: Backup {backup_id_9} has missing ' + f'parent {missing_full_id}') + self.assertMessage(contains=f'WARNING: Backup {validate_id} has missing ' + f'parent {missing_full_id}') + self.assertMessage(contains=f'WARNING: Backup {backup_id_7} has missing ' + f'parent {missing_full_id}') + self.assertMessage(contains=f'WARNING: Backup {missing_page_id} is orphaned ' + f'because his parent {missing_full_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {backup_id_5} is orphaned ' + f'because his parent {missing_full_id} is missing') + + self.assertTrue(self.pb.show('node')[10]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[3]['status'] == 'ORPHAN') # FULL1 - is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') - os.rename(full_new_directory, full_old_directory) + self.change_backup_status(self.backup_dir, 'node', missing_full_id, 'OK') # Revalidate chain - self.validate_pb(backup_dir, 'node', validate_id, options=["-j", "4"]) - - self.assertTrue(self.show_pb(backup_dir, 'node')[11]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.pb.validate('node', validate_id, options=["-j", "4"]) + + self.assertTrue(self.pb.show('node')[11]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[10]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[7]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[6]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[5]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[4]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[3]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # @unittest.skip("skip") def test_validate_with_missing_backup_2(self): @@ -3230,185 +2201,123 @@ def test_validate_with_missing_backup_2(self): PAGE1_1 FULL1 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) + - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # CHAIN1 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') # CHAIN2 - missing_full_id = self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - missing_page_id = self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node( - backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') + missing_full_id = self.pb.backup_node('node', node) + backup_id_5 = self.pb.backup_node('node', node, backup_type='page') + missing_page_id = self.pb.backup_node( + 'node', node, backup_type='page') + backup_id_7 = self.pb.backup_node('node', node, backup_type='page') + backup_id_8 = self.pb.backup_node('node', node, backup_type='page') + backup_id_9 = self.pb.backup_node('node', node, backup_type='page') # CHAIN3 - self.backup_node(backup_dir, 'node', node) - self.backup_node(backup_dir, 'node', node, backup_type='page') - self.backup_node(backup_dir, 'node', node, backup_type='page') - - page_old_directory = os.path.join(backup_dir, 'backups', 'node', missing_page_id) - page_new_directory = os.path.join(backup_dir, missing_page_id) - os.rename(page_old_directory, page_new_directory) - - full_old_directory = os.path.join(backup_dir, 'backups', 'node', missing_full_id) - full_new_directory = os.path.join(backup_dir, missing_full_id) - os.rename(full_old_directory, full_new_directory) - - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], missing_page_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[3]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.pb.backup_node('node', node) + self.pb.backup_node('node', node, backup_type='page') + self.pb.backup_node('node', node, backup_type='page') + + self.change_backup_status(self.backup_dir, 'node', missing_page_id, + 'THIS_BACKUP_IS_HIDDEN_FOR_TESTS') + self.change_backup_status(self.backup_dir, 'node', missing_full_id, + 'THIS_BACKUP_IS_HIDDEN_FOR_TESTS') + + self.pb.validate('node', options=["-j", "4"], + expect_error="because of backup dissapearance") + self.assertMessage(contains=f'WARNING: Backup {backup_id_9} is orphaned ' + f'because his parent {missing_page_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {backup_id_8} is orphaned ' + f'because his parent {missing_page_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {backup_id_7} is orphaned ' + f'because his parent {missing_page_id} is missing') + self.assertMessage(contains=f'WARNING: Backup {backup_id_5} is orphaned ' + f'because his parent {missing_full_id} is missing') + + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'ORPHAN') # PAGE2_2 is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[3]['status'] == 'ORPHAN') # FULL1 - is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') - os.rename(page_new_directory, page_old_directory) + self.change_backup_status(self.backup_dir, 'node', missing_page_id, 'OK') # Revalidate backup chain - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of backup dissapearance.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[7]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[6]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[5]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} is orphaned because his parent {1} is missing'.format( - self.show_pb(backup_dir, 'node')[4]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn( - 'WARNING: Backup {0} has missing parent {1}'.format( - self.show_pb(backup_dir, 'node')[3]['id'], missing_full_id), - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - self.assertTrue(self.show_pb(backup_dir, 'node')[10]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[9]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[8]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'ORPHAN') + self.pb.validate('node', options=["-j", "4"], + expect_error="because of backup dissapearance") + self.assertMessage(regex=fr'WARNING: Backup {backup_id_9} (.*(missing|parent {missing_full_id})){{2}}') + self.assertMessage(regex=fr'WARNING: Backup {backup_id_8} (.*(missing|parent {missing_full_id})){{2}}') + self.assertMessage(regex=fr'WARNING: Backup {backup_id_7} (.*(missing|parent {missing_full_id})){{2}}') + self.assertMessage(regex=fr'WARNING: Backup {missing_page_id} (.*(missing|parent {missing_full_id})){{2}}') + self.assertMessage(regex=fr'WARNING: Backup {backup_id_5} (.*(missing|parent {missing_full_id})){{2}}') + + self.assertTrue(self.pb.show('node')[10]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[9]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[8]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[7]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[6]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[4]['status'] == 'ORPHAN') + self.assertTrue(self.pb.show('node')[3]['status'] == 'ORPHAN') # FULL1 - is missing - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[2]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[1]['status'] == 'OK') + self.assertTrue(self.pb.show('node')[0]['status'] == 'OK') # @unittest.skip("skip") def test_corrupt_pg_control_via_resetxlog(self): """ PGPRO-2096 """ - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + if not self.backup_dir.is_file_based: + self.skipTest('tests uses pg_resetxlog on backup') + + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) - if self.get_version(node) < 100000: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' os.mkdir( os.path.join( - backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'archive_status')) + self.backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'archive_status')) pg_control_path = os.path.join( - backup_dir, 'backups', 'node', + self.backup_dir, 'backups', 'node', backup_id, 'database', 'global', 'pg_control') md5_before = hashlib.md5( open(pg_control_path, 'rb').read()).hexdigest() - self.run_binary( + self.pb.run_binary( [ pg_resetxlog_path, '-D', - os.path.join(backup_dir, 'backups', 'node', backup_id, 'database'), + os.path.join(self.backup_dir, 'backups', 'node', backup_id, 'database'), '-o 42', '-f' ], @@ -3422,54 +2331,37 @@ def test_corrupt_pg_control_via_resetxlog(self): md5_before, md5_after)) # Validate backup - try: - self.validate_pb(backup_dir, 'node', options=["-j", "4"]) - self.assertEqual( - 1, 0, - "Expecting Error because of pg_control change.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'data files are corrupted', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', options=["-j", "4"], + expect_error="because of pg_control change") + self.assertMessage(contains='data files are corrupted') - # @unittest.skip("skip") + @needs_gdb def test_validation_after_backup(self): """""" - self._check_gdb_flag_or_skip_test() - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + node = self.pg_node.make_simple('node', + set_replication=True) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() # FULL backup - gdb = self.backup_node( - backup_dir, 'node', node, gdb=True, options=['--stream']) + gdb = self.pb.backup_node( + 'node', node, gdb=True, options=['--stream']) gdb.set_breakpoint('pgBackupValidate') gdb.run_until_break() - backup_id = self.show_pb(backup_dir, 'node')[0]['id'] + backup_id = self.pb.show('node')[0]['id'] - file = os.path.join( - backup_dir, "backups", "node", backup_id, - "database", "postgresql.conf") - os.remove(file) + self.remove_backup_file(self.backup_dir, 'node', backup_id, + 'database/postgresql.conf') gdb.continue_execution_until_exit() self.assertEqual( 'CORRUPT', - self.show_pb(backup_dir, 'node', backup_id)['status'], + self.pb.show('node', backup_id)['status'], 'Backup STATUS should be "ERROR"') # @unittest.expectedFailure @@ -3478,14 +2370,12 @@ def test_validate_corrupt_tablespace_map(self): """ Check that corruption in tablespace_map is detected """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'external_dir') @@ -3495,54 +2385,32 @@ def test_validate_corrupt_tablespace_map(self): 'CREATE TABLE t_heap(a int) TABLESPACE "external_dir"') # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - tablespace_map = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'tablespace_map') + backup_id = self.pb.backup_node( + 'node', node, options=['--stream']) # Corrupt tablespace_map file in FULL backup - with open(tablespace_map, "rb+", 0) as f: - f.seek(84) - f.write(b"blah") - f.flush() - f.close - - try: - self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because tablespace_map is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'WARNING: Invalid CRC of backup file', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.corrupt_backup_file(self.backup_dir, 'node', backup_id, + 'database/tablespace_map', damage=(84,b"blah")) + + self.pb.validate('node', backup_id=backup_id, + expect_error="because tablespace_map is corrupted") + self.assertMessage(contains='WARNING: Invalid CRC of backup file') - #TODO fix the test - @unittest.expectedFailure - # @unittest.skip("skip") def test_validate_target_lsn(self): """ - Check validation to specific LSN + Check validation to specific LSN from "forked" backup """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() # FULL backup - self.backup_node(backup_dir, 'node', node) + self.pb.backup_node('node', node) node.safe_psql( "postgres", @@ -3550,45 +2418,38 @@ def test_validate_target_lsn(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i") - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + node_restored = self.pg_node.make_simple('node_restored') node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) + self.pb.restore_node('node', node_restored) - self.set_auto_conf( - node_restored, {'port': node_restored.port}) + node_restored.set_auto_conf({'port': node_restored.port}) node_restored.slow_start() self.switch_wal_segment(node) - backup_id = self.backup_node( - backup_dir, 'node', node_restored, + self.pb.backup_node( + 'node', node_restored, data_dir=node_restored.data_dir) - target_lsn = self.show_pb(backup_dir, 'node')[1]['stop-lsn'] + target_lsn = self.pb.show('node')[1]['stop-lsn'] - self.delete_pb(backup_dir, 'node', backup_id) - - self.validate_pb( - backup_dir, 'node', + self.pb.validate( + 'node', options=[ '--recovery-target-timeline=2', '--recovery-target-lsn={0}'.format(target_lsn)]) - @unittest.skip("skip") def test_partial_validate_empty_and_mangled_database_map(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() @@ -3599,65 +2460,35 @@ def test_partial_validate_empty_and_mangled_database_map(self): 'CREATE database db{0}'.format(i)) # FULL backup with database_map - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - pgdata = self.pgdata_content(node.data_dir) + backup_id = self.pb.backup_node( + 'node', node, options=['--stream']) # truncate database_map - path = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'database_map') - with open(path, "w") as f: - f.close() - - try: - self.validate_pb( - backup_dir, 'node', - options=["--db-include=db1"]) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "WARNING: Backup {0} data files are corrupted".format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.corrupt_backup_file(self.backup_dir, 'node', backup_id, + 'database/database_map', truncate=0) + + self.pb.validate('node', backup_id, + options=["--db-include=db1"], + expect_error="because database_map is empty") + self.assertMessage(contains=f"WARNING: Backup {backup_id} data files are corrupted") # mangle database_map - with open(path, "w") as f: - f.write("42") - f.close() - - try: - self.validate_pb( - backup_dir, 'node', - options=["--db-include=db1"]) - self.assertEqual( - 1, 0, - "Expecting Error because database_map is empty.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "WARNING: Backup {0} data files are corrupted".format( - backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.corrupt_backup_file(self.backup_dir, 'node', backup_id, + 'database/database_map', overwrite=b'42') + + self.pb.validate('node', backup_id, + options=["--db-include=db1"], + expect_error="because database_map is mangled") + self.assertMessage(contains=f"WARNING: Backup {backup_id} data files are corrupted") - @unittest.skip("skip") def test_partial_validate_exclude(self): """""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() for i in range(1, 10, 1): @@ -3666,67 +2497,37 @@ def test_partial_validate_exclude(self): 'CREATE database db{0}'.format(i)) # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - - try: - self.validate_pb( - backup_dir, 'node', - options=[ - "--db-include=db1", - "--db-exclude=db2"]) - self.assertEqual( - 1, 0, - "Expecting Error because of 'db-exclude' and 'db-include'.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--db-include' " - "and '--db-exclude' together", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb( - backup_dir, 'node', - options=[ - "--db-exclude=db1", - "--db-exclude=db5", - "--log-level-console=verbose"]) - self.assertEqual( - 1, 0, - "Expecting Error because of missing backup ID.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You must specify parameter (-i, --backup-id) for partial validation", - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - output = self.validate_pb( - backup_dir, 'node', backup_id, + backup_id = self.pb.backup_node('node', node) + + self.pb.validate('node', + options=["--db-include=db1", "--db-exclude=db2"], + expect_error="because of 'db-exclude' and 'db-include'") + self.assertMessage(contains="ERROR: You cannot specify '--db-include' " + "and '--db-exclude' together") + + self.pb.validate('node', + options=[ + "--db-exclude=db1", + "--db-exclude=db5", + "--log-level-console=verbose"], + expect_error="because of missing backup ID") + self.assertMessage(contains="ERROR: You must specify parameter (-i, --backup-id) for partial validation") + + self.pb.validate( + 'node', backup_id, options=[ "--db-exclude=db1", - "--db-exclude=db5", - "--log-level-console=verbose"]) + "--db-exclude=db5"]) - self.assertIn( - "VERBOSE: Skip file validation due to partial restore", output) - - @unittest.skip("skip") def test_partial_validate_include(self): """ """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) + node = self.pg_node.make_simple('node') + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) node.slow_start() for i in range(1, 10, 1): @@ -3735,62 +2536,37 @@ def test_partial_validate_include(self): 'CREATE database db{0}'.format(i)) # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) + backup_id = self.pb.backup_node('node', node) - try: - self.validate_pb( - backup_dir, 'node', - options=[ - "--db-include=db1", - "--db-exclude=db2"]) - self.assertEqual( - 1, 0, - "Expecting Error because of 'db-exclude' and 'db-include'.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: You cannot specify '--db-include' " - "and '--db-exclude' together", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', + options=["--db-include=db1", "--db-exclude=db2"], + expect_error="because of 'db-exclude' and 'db-include'") + self.assertMessage(contains="ERROR: You cannot specify '--db-include' " + "and '--db-exclude' together") - output = self.validate_pb( - backup_dir, 'node', backup_id, + self.pb.validate( + 'node', backup_id, options=[ "--db-include=db1", "--db-include=db5", - "--db-include=postgres", - "--log-level-console=verbose"]) + "--db-include=postgres"]) - self.assertIn( - "VERBOSE: Skip file validation due to partial restore", output) - - output = self.validate_pb( - backup_dir, 'node', backup_id, - options=["--log-level-console=verbose"]) - - self.assertNotIn( - "VERBOSE: Skip file validation due to partial restore", output) + self.pb.validate( + 'node', backup_id, + options=[]) # @unittest.skip("skip") def test_not_validate_diffenent_pg_version(self): """Do not validate backup, if binary is compiled with different PG version""" - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() + node = self.pg_node.make_simple('node') - backup_id = self.backup_node(backup_dir, 'node', node) - control_file = os.path.join( - backup_dir, "backups", "node", backup_id, - "backup.control") + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + backup_id = self.pb.backup_node('node', node) pg_version = node.major_version @@ -3799,29 +2575,15 @@ def test_not_validate_diffenent_pg_version(self): fake_new_pg_version = pg_version + 1 - with open(control_file, 'r') as f: - data = f.read(); - - data = data.replace( - "server-version = {0}".format(str(pg_version)), - "server-version = {0}".format(str(fake_new_pg_version))) + with self.modify_backup_control(self.backup_dir, 'node', backup_id) as cf: + cf.data = cf.data.replace( + "server-version = {0}".format(str(pg_version)), + "server-version = {0}".format(str(fake_new_pg_version)) + ) - with open(control_file, 'w') as f: - f.write(data); - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because validation is forbidden if server version of backup " - "is different from the server version of pg_probackup.\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Backup {0} has server version".format(backup_id), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + self.pb.validate(expect_error="because validation is forbidden if server version of backup " + "is different from the server version of pg_probackup.") + self.assertMessage(contains=f"ERROR: Backup {backup_id} has server version") # @unittest.expectedFailure # @unittest.skip("skip") @@ -3829,60 +2591,42 @@ def test_validate_corrupt_page_header_map(self): """ Check that corruption in page_header_map is detected """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + ok_1 = self.pb.backup_node('node', node, options=['--stream']) # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) - - ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node( + 'node', node, options=['--stream']) - page_header_map = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'page_header_map') - - # Corrupt tablespace_map file in FULL backup - with open(page_header_map, "rb+", 0) as f: - f.seek(42) - f.write(b"blah") - f.flush() + ok_2 = self.pb.backup_node('node', node, options=['--stream']) - with self.assertRaises(ProbackupException) as cm: - self.validate_pb(backup_dir, 'node', backup_id=backup_id) + self.corrupt_backup_file(self.backup_dir, 'node', backup_id, + 'page_header_map', damage=(42, b"blah")) - e = cm.exception - self.assertRegex( - cm.exception.message, - r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate('node', backup_id=backup_id, + expect_error="because page_header_map is corrupted") - self.assertIn("Backup {0} is corrupt".format(backup_id), e.message) + self.assertMessage(regex= + r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error') - with self.assertRaises(ProbackupException) as cm: - self.validate_pb(backup_dir) + self.assertMessage(contains=f"Backup {backup_id} is corrupt") - e = cm.exception - self.assertRegex( - e.message, - r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.pb.validate(expect_error="because page_header_map is corrupted") - self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) - self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) + self.assertMessage(regex= + r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error') - self.assertIn("WARNING: Some backups are not valid", e.message) + self.assertMessage(contains=f"INFO: Backup {ok_1} data files are valid") + self.assertMessage(contains=f"WARNING: Backup {backup_id} data files are corrupted") + self.assertMessage(contains=f"INFO: Backup {ok_2} data files are valid") + self.assertMessage(contains="WARNING: Some backups are not valid") # @unittest.expectedFailure # @unittest.skip("skip") @@ -3890,58 +2634,34 @@ def test_validate_truncated_page_header_map(self): """ Check that corruption in page_header_map is detected """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + ok_1 = self.pb.backup_node('node', node, options=['--stream']) # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node( + 'node', node, options=['--stream']) - ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + ok_2 = self.pb.backup_node('node', node, options=['--stream']) - page_header_map = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'page_header_map') + self.corrupt_backup_file(self.backup_dir, 'node', backup_id, + 'page_header_map', truncate=121) - # truncate page_header_map file - with open(page_header_map, "rb+", 0) as f: - f.truncate(121) - f.flush() - f.close - - try: - self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} is corrupt'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) - self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) - self.assertIn("WARNING: Some backups are not valid", e.message) + self.pb.validate('node', backup_id=backup_id, + expect_error="because page_header_map is corrupted") + self.assertMessage(contains=f'ERROR: Backup {backup_id} is corrupt') + + self.pb.validate(expect_error="because page_header_map is corrupted") + self.assertMessage(contains=f"INFO: Backup {ok_1} data files are valid") + self.assertMessage(contains=f"WARNING: Backup {backup_id} data files are corrupted") + self.assertMessage(contains=f"INFO: Backup {ok_2} data files are valid") + self.assertMessage(contains="WARNING: Some backups are not valid") # @unittest.expectedFailure # @unittest.skip("skip") @@ -3949,55 +2669,34 @@ def test_validate_missing_page_header_map(self): """ Check that corruption in page_header_map is detected """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() - ok_1 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + ok_1 = self.pb.backup_node('node', node, options=['--stream']) # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node( + 'node', node, options=['--stream']) - ok_2 = self.backup_node(backup_dir, 'node', node, options=['--stream']) + ok_2 = self.pb.backup_node('node', node, options=['--stream']) - page_header_map = os.path.join( - backup_dir, 'backups', 'node', backup_id, 'page_header_map') + self.remove_backup_file(self.backup_dir, 'node', backup_id, + 'page_header_map') - # unlink page_header_map file - os.remove(page_header_map) + self.pb.validate('node', backup_id=backup_id, + expect_error="because page_header_map is missing") + self.assertMessage(contains=f'ERROR: Backup {backup_id} is corrupt') - try: - self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Backup {0} is corrupt'.format(backup_id), e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - try: - self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) - self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) - self.assertIn("WARNING: Some backups are not valid", e.message) + self.pb.validate(expect_error="because page_header_map is missing") + self.assertMessage(contains=f"INFO: Backup {ok_1} data files are valid") + self.assertMessage(contains=f"WARNING: Backup {backup_id} data files are corrupted") + self.assertMessage(contains=f"INFO: Backup {ok_2} data files are valid") + self.assertMessage(contains="WARNING: Some backups are not valid") # @unittest.expectedFailure # @unittest.skip("skip") @@ -4005,14 +2704,12 @@ def test_no_validate_tablespace_map(self): """ Check that --no-validate is propagated to tablespace_map """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) + node = self.pg_node.make_simple('node', + set_replication=True) + + self.pb.init() + self.pb.add_instance('node', node) node.slow_start() self.create_tblspace_in_node(node, 'external_dir') @@ -4028,23 +2725,19 @@ def test_no_validate_tablespace_map(self): "select oid from pg_tablespace where spcname = 'external_dir'").decode('utf-8').rstrip() # FULL backup - backup_id = self.backup_node( - backup_dir, 'node', node, options=['--stream']) + backup_id = self.pb.backup_node( + 'node', node, options=['--stream']) pgdata = self.pgdata_content(node.data_dir) - tablespace_map = os.path.join( - backup_dir, 'backups', 'node', - backup_id, 'database', 'tablespace_map') - - # overwrite tablespace_map file - with open(tablespace_map, "w") as f: - f.write("{0} {1}".format(oid, tblspace_new)) - f.close + self.corrupt_backup_file(self.backup_dir, 'node', backup_id, + 'database/tablespace_map', + overwrite="{0} {1}".format(oid, tblspace_new), + text=True) node.cleanup() - self.restore_node(backup_dir, 'node', node, options=['--no-validate']) + self.pb.restore_node('node', node, options=['--no-validate']) pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) @@ -4060,6 +2753,23 @@ def test_no_validate_tablespace_map(self): tblspace_new, "Symlink '{0}' do not points to '{1}'".format(tablespace_link, tblspace_new)) + def test_custom_wal_segsize(self): + """ + Check that we can validate a specific instance or a whole catalog + having a custom wal segment size. + """ + node = self.pg_node.make_simple('node', + initdb_params=['--wal-segsize=64'], + pg_options={'min_wal_size': '128MB'}) + self.pb.init() + self.pb.add_instance('node', node) + node.slow_start() + + self.pb.backup_node('node', node, options=['--stream']) + + self.pb.validate('node') + self.pb.validate() + # validate empty backup list # page from future during validate # page from future during backup