How to use the archivebox.legacy.index.load_main_index function in archivebox

To help you get started, we’ve selected a few archivebox examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pirate / ArchiveBox / archivebox / tests.py View on Github external
def test_conflicting_init(self):
        with open(os.path.join(OUTPUT_DIR, 'test_conflict.txt'), 'w+') as f:
            f.write('test')

        try:
            with output_hidden(show_failing=False):
                archivebox_init.main([])
            assert False, 'Init should have exited with an exception'
        except SystemExit:
            pass

        assert not os.path.exists(os.path.join(OUTPUT_DIR, SQL_INDEX_FILENAME))
        assert not os.path.exists(os.path.join(OUTPUT_DIR, JSON_INDEX_FILENAME))
        assert not os.path.exists(os.path.join(OUTPUT_DIR, HTML_INDEX_FILENAME))
        try:
            load_main_index(out_dir=OUTPUT_DIR)
            assert False, 'load_main_index should raise an exception when no index is present'
        except:
            pass
github pirate / ArchiveBox / archivebox / tests.py View on Github external
def test_remove_regex(self):
        with output_hidden():
            archivebox_remove.main(['--yes', '--delete', '--filter-type=regex', 'http(s)?:\/\/(.+\.)?(example\d\.com)'])

        all_links = load_main_index(out_dir=OUTPUT_DIR)
        assert len(all_links) == 4
github pirate / ArchiveBox / archivebox / tests.py View on Github external
def test_add_stdin_url(self):
        with output_hidden():
            archivebox_add.main([], stdin=test_urls)

        all_links = load_main_index(out_dir=OUTPUT_DIR)
        assert len(all_links) == 12
github pirate / ArchiveBox / archivebox / tests.py View on Github external
def test_remove_exact(self):
        with output_hidden():
            archivebox_remove.main(['--yes', '--delete', 'https://example5.com/'])

        all_links = load_main_index(out_dir=OUTPUT_DIR)
        assert len(all_links) == 11
github pirate / ArchiveBox / archivebox / legacy / main.py View on Github external
try:
        for idx, link in enumerate(links_after_timestamp(links, resume)):
            archive_link(link, out_dir=link.link_dir)

    except KeyboardInterrupt:
        log_archiving_paused(len(links), idx, link.timestamp if link else '0')
        raise SystemExit(0)

    except:
        print()
        raise    

    log_archiving_finished(len(links))

    # Step 4: Re-write links index with updated titles, icons, and resources
    all_links = load_main_index(out_dir=OUTPUT_DIR)
    write_main_index(links=list(all_links), out_dir=OUTPUT_DIR, finished=True)
    return all_links
github pirate / ArchiveBox / archivebox / legacy / main.py View on Github external
))
    finally:
        timer.end()

    if not len(links):
        log_removal_finished(0, 0)
        raise SystemExit(1)


    log_list_finished(links)
    log_removal_started(links, yes=yes, delete=delete)

    timer = TimedProgress(360, prefix='      ')
    try:
        to_keep = []
        all_links = load_main_index(out_dir=OUTPUT_DIR)
        for link in all_links:
            should_remove = (
                (after is not None and float(link.timestamp) < after)
                or (before is not None and float(link.timestamp) > before)
                or link_matches_filter(link, filter_patterns, filter_type)
            )
            if not should_remove:
                to_keep.append(link)
            elif should_remove and delete:
                shutil.rmtree(link.link_dir)
    finally:
        timer.end()

    write_main_index(links=to_keep, out_dir=OUTPUT_DIR, finished=True)
    log_removal_finished(len(all_links), len(to_keep))
github pirate / ArchiveBox / archivebox / legacy / main.py View on Github external
assert os.path.exists(settings.DATABASE_FILE)
    
    # from django.contrib.auth.models import User
    # if IS_TTY and not User.objects.filter(is_superuser=True).exists():
    #     print('{green}[+] Creating admin user account...{reset}'.format(**ANSI))
    #     call_command("createsuperuser", interactive=True)

    print()
    print('{green}[*] Collecting links from any existing index or archive folders...{reset}'.format(**ANSI))

    all_links = {}
    if existing_index:
        all_links = {
            link.url: link
            for link in load_main_index(out_dir=OUTPUT_DIR, warn=False)
        }
        print('    √ Loaded {} links from existing main index...'.format(len(all_links)))

    orphaned_json_links = {
        link.url: link
        for link in parse_json_main_index(OUTPUT_DIR)
        if link.url not in all_links
    }
    if orphaned_json_links:
        all_links.update(orphaned_json_links)
        print('    {lightyellow}√ Added {} orphaned links from existing JSON index...{reset}'.format(len(orphaned_json_links), **ANSI))

    orphaned_sql_links = {
        link.url: link
        for link in parse_sql_main_index(OUTPUT_DIR)
        if link.url not in all_links
github pirate / ArchiveBox / archivebox / legacy / main.py View on Github external
def info():

    print('{green}[*] Scanning archive collection main index...{reset}'.format(**ANSI))
    print(f'    {OUTPUT_DIR}/*')
    num_bytes, num_dirs, num_files = get_dir_size(OUTPUT_DIR, recursive=False, pattern='index.')
    size = human_readable_size(num_bytes)
    print(f'    Size: {size} across {num_files} files')
    print()

    links = list(load_main_index(out_dir=OUTPUT_DIR))
    num_json_links = len(links)
    num_sql_links = sum(1 for link in parse_sql_main_index(out_dir=OUTPUT_DIR))
    num_html_links = sum(1 for url in parse_html_main_index(out_dir=OUTPUT_DIR))
    num_link_details = sum(1 for link in parse_json_links_details(out_dir=OUTPUT_DIR))
    users = get_admins().values_list('username', flat=True)
    print(f'    > JSON Main Index: {num_json_links} links'.ljust(36),  f'(found in {JSON_INDEX_FILENAME})')
    print(f'    > SQL Main Index: {num_sql_links} links'.ljust(36), f'(found in {SQL_INDEX_FILENAME})')
    print(f'    > HTML Main Index: {num_html_links} links'.ljust(36), f'(found in {HTML_INDEX_FILENAME})')
    print(f'    > JSON Link Details: {num_link_details} links'.ljust(36), f'(found in {ARCHIVE_DIR_NAME}/*/index.json)')

    print(f'    > Admin: {len(users)} users {", ".join(users)}'.ljust(36), f'(found in {SQL_INDEX_FILENAME})')
    
    if num_html_links != len(links) or num_sql_links != len(links):
        print()
        print('    {lightred}Hint:{reset} You can fix index count differences automatically by running:'.format(**ANSI))
        print('        archivebox init')
github pirate / ArchiveBox / archivebox / legacy / main.py View on Github external
def update_archive_data(import_path: Optional[str]=None, 
                        resume: Optional[float]=None,
                        only_new: bool=False,
                        index_only: bool=False) -> List[Link]:
    """The main ArchiveBox entrancepoint. Everything starts here."""

    check_dependencies()
    check_data_folder()

    # Step 1: Load list of links from the existing index
    #         merge in and dedupe new links from import_path
    all_links: List[Link] = []
    new_links: List[Link] = []
    all_links = load_main_index(out_dir=OUTPUT_DIR)
    if import_path:
        all_links, new_links = import_new_links(all_links, import_path)

    # Step 2: Write updated index with deduped old and new links back to disk
    write_main_index(links=list(all_links), out_dir=OUTPUT_DIR)

    if index_only:
        return all_links
        
    # Step 3: Run the archive methods for each link
    links = new_links if ONLY_NEW else all_links
    log_archiving_started(len(links), resume)
    idx: int = 0
    link: Link = None                                             # type: ignore
    try:
        for idx, link in enumerate(links_after_timestamp(links, resume)):