How to use the barman.infofile.BackupInfo function in barman

To help you get started, we’ve selected a few barman examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github 2ndquadrant-it / barman / tests / testing_helpers.py View on Github external
begin_time=None,
        begin_wal='000000010000000000000002',
        begin_xlog='0/2000028',
        config_file='/pgdata/location/postgresql.conf',
        end_offset=184,
        end_time=None,
        end_wal='000000010000000000000002',
        end_xlog='0/20000B8',
        error=None,
        hba_file='/pgdata/location/pg_hba.conf',
        ident_file='/pgdata/location/pg_ident.conf',
        mode='default',
        pgdata='/pgdata/location',
        server_name='test_server',
        size=12345,
        status=BackupInfo.DONE,
        included_files=None,
        tablespaces=(
            ('tbs1', 16387, '/fake/location'),
            ('tbs2', 16405, '/another/location'),
        ),
        timeline=1,
        version=90302,
        server=None,
        copy_stats=None):
    """
    Create an 'Ad Hoc' BackupInfo object for testing purposes.

    A BackupInfo object is the barman representation of a physical backup,
    for testing purposes is necessary to build a BackupInfo avoiding the usage
    of Mock/MagicMock classes as much as possible.
github 2ndquadrant-it / barman / barman / cli.py View on Github external
def recover(args):
    """
    Recover a server at a given time, name, LSN or xid
    """
    server = get_server(args)

    # Retrieves the backup
    backup_id = parse_backup_id(server, args)
    if backup_id.status not in BackupInfo.STATUS_COPY_DONE:
        output.error(
            "Cannot recover from backup '%s' of server '%s': "
            "backup status is not DONE",
            args.backup_id, server.config.name)
        output.close_and_exit()

    # decode the tablespace relocation rules
    tablespaces = {}
    if args.tablespace:
        for rule in args.tablespace:
            try:
                tablespaces.update([rule.split(':', 1)])
            except ValueError:
                output.error(
                    "Invalid tablespace relocation rule '%s'\n"
                    "HINT: The valid syntax for a relocation rule is "
github 2ndquadrant-it / barman / barman / infofile.py View on Github external
def __init__(self, backup_id, **kwargs):
        """
        Stores meta information about a single backup

        :param str,None backup_id:
        """
        self.backup_version = 2
        self.backup_id = backup_id
        super(BackupInfo, self).__init__(**kwargs)
github 2ndquadrant-it / barman / barman / backup.py View on Github external
def cron_retention_policy(self):
        """
        Retention policy management
        """
        enforce_retention_policies = self.server.enforce_retention_policies
        retention_policy_mode = self.config.retention_policy_mode
        if (enforce_retention_policies and retention_policy_mode == 'auto'):
            available_backups = self.get_available_backups(
                BackupInfo.STATUS_ALL)
            retention_status = self.config.retention_policy.report()
            for bid in sorted(retention_status.keys()):
                if retention_status[bid] == BackupInfo.OBSOLETE:
                    output.info(
                        "Enforcing retention policy: removing backup %s for "
                        "server %s" % (bid, self.config.name))
                    self.delete_backup(available_backups[bid])
github 2ndquadrant-it / barman / barman / cloud.py View on Github external
def backup(self):
        """
        Upload a Backup  to S3
        """
        server_name = 'cloud'
        backup_info = BackupInfo(
            backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S'),
            server_name=server_name,
        )
        backup_info.set_attribute("systemid", self.postgres.get_systemid())
        key_prefix = os.path.join(
            self.cloud_interface.path,
            self.server_name,
            'base',
            backup_info.backup_id
        )
        controller = S3UploadController(
            self.cloud_interface,
            key_prefix,
            self.max_archive_size,
            self.compression,
        )
github 2ndquadrant-it / barman / barman / retention_policies.py View on Github external
def report(self, source=None, context=None):
        """Report obsolete/valid objects according to the retention policy"""
        if context is None:
            context = self.context
        # Overrides the list of available backups
        if source is None:
            source = self.server.get_available_backups(
                BackupInfo.STATUS_NOT_EMPTY)
        if context == 'BASE':
            return self._backup_report(source)
        elif context == 'WAL':
            return self._wal_report()
        else:
            raise ValueError('Invalid context %s', context)
github 2ndquadrant-it / barman / barman / retention_policies.py View on Github external
for bid in sorted(backups.keys(), reverse=True):
            # We are interested in DONE backups only
            if backups[bid].status == BackupInfo.DONE:
                if found:
                    # Check minimum redundancy requirements
                    if valid < self.server.config.minimum_redundancy:
                        _logger.warning(
                            "Keeping obsolete backup %s for server %s "
                            "(older than %s) "
                            "due to minimum redundancy requirements (%s)",
                            bid, self.server.config.name,
                            self._point_of_recoverability(),
                            self.server.config.minimum_redundancy)
                        # We mark the backup as potentially obsolete
                        # as we must respect minimum redundancy requirements
                        report[bid] = BackupInfo.POTENTIALLY_OBSOLETE
                        self._first_backup = bid
                        valid = valid + 1
                    else:
                        # We mark this backup as obsolete
                        # (older than the first valid one)
                        _logger.info(
                            "Reporting backup %s for server %s as OBSOLETE "
                            "(older than %s)",
                            bid, self.server.config.name,
                            self._point_of_recoverability())
                        report[bid] = BackupInfo.OBSOLETE
                else:
                    _logger.debug(
                        "Reporting backup %s for server %s as VALID "
                        "(newer than %s)",
                        bid, self.server.config.name,
github 2ndquadrant-it / barman / barman / server.py View on Github external
def check_backup(self, backup_info):
        """
        Make sure that we have all the WAL files required
        by a physical backup for consistencty (from the
        first to the last WAL file)

        :param backup_info: the target backup
        """
        output.debug("Checking backup %s of server %s",
                     backup_info.backup_id, self.config.name)
        try:
            # No need to check a backup which is not waiting for WALs.
            # Doing that we could also mark as DONE backups which
            # were previously FAILED due to copy errors
            if backup_info.status == BackupInfo.FAILED:
                output.error(
                    "The validity of a failed backup cannot be checked")
                return

            # Take care of the backup lock.
            # Only one process can modify a backup a a time
            with ServerBackupIdLock(self.config.barman_lock_directory,
                                    self.config.name,
                                    backup_info.backup_id):
                orig_status = backup_info.status
                self.backup_manager.check_backup(backup_info)
                if orig_status == backup_info.status:
                    output.debug(
                        "Check finished: the status of backup %s of server %s "
                        "remains %s",
                        backup_info.backup_id,
github 2ndquadrant-it / barman / barman / retention_policies.py View on Github external
def _backup_report(self, source):
        """Report obsolete/valid backups according to the retention policy"""
        report = dict()
        backups = source
        # Map as VALID all DONE backups having end time lower than
        # the point of recoverability. The older ones
        # are classified as OBSOLETE.
        # Non DONE backups are classified as NONE
        found = False
        valid = 0
        # NOTE: reverse key orders (simulate reverse chronology)
        for bid in sorted(backups.keys(), reverse=True):
            # We are interested in DONE backups only
            if backups[bid].status == BackupInfo.DONE:
                if found:
                    # Check minimum redundancy requirements
                    if valid < self.server.config.minimum_redundancy:
                        _logger.warning(
                            "Keeping obsolete backup %s for server %s "
                            "(older than %s) "
                            "due to minimum redundancy requirements (%s)",
                            bid, self.server.config.name,
                            self._point_of_recoverability(),
                            self.server.config.minimum_redundancy)
                        # We mark the backup as potentially obsolete
                        # as we must respect minimum redundancy requirements
                        report[bid] = BackupInfo.POTENTIALLY_OBSOLETE
                        self._first_backup = bid
                        valid = valid + 1
                    else:
github 2ndquadrant-it / barman / barman / backup.py View on Github external
# prior to the start of the backup being deleted, as they
            # might be useful to any concurrent backup started immediately
            # after.
            remove_until = None  # means to remove all WAL files
            if next_backup:
                remove_until = next_backup
            elif BackupOptions.CONCURRENT_BACKUP in self.config.backup_options:
                remove_until = backup

            timelines_to_protect = set()
            # If remove_until is not set there are no backup left
            if remove_until:
                # Retrieve the list of extra timelines that contains at least
                # a backup. On such timelines we don't want to delete any WAL
                for value in self.get_available_backups(
                        BackupInfo.STATUS_ARCHIVING).values():
                    # Ignore the backup that is being deleted
                    if value == backup:
                        continue
                    timelines_to_protect.add(value.timeline)
                # Remove the timeline of `remove_until` from the list.
                # We have enough information to safely delete unused WAL files
                # on it.
                timelines_to_protect -= set([remove_until.timeline])

            output.info("Delete associated WAL segments:")
            for name in self.remove_wal_before_backup(remove_until,
                                                      timelines_to_protect):
                output.info("\t%s", name)
        # As last action, remove the backup directory,
        # ending the delete operation
        try: