How to use the tabulator.topen function in tabulator

To help you get started, we’ve selected a few tabulator examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_local_csv_parser_options():

    # Get table
    table = topen('data/table.csv',
            parser_options={'constructor': CSVParser})

    # Make assertions
    assert table.headers is None
    assert table.read() == [['id', 'name'], ['1', 'english'], ['2', '中国人']]
github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_headers_user_set():

    # Get table
    source = [['1', 'english'], ['2', '中国人']]
    table = topen(source, headers=['id', 'name'])

    # Make assertions
    assert table.headers == ['id', 'name']
    assert list(table.iter(keyed=True)) == [
        {'id': '1', 'name': 'english'},
        {'id': '2', 'name': '中国人'}]
github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_headers_with_headers_argument():

    # Get table
    table = topen('data/table.csv', with_headers=True)

    # Make assertions
    assert table.headers == ['id', 'name']
    assert list(table.iter(keyed=True)) == [
        {'id': '1', 'name': 'english'},
        {'id': '2', 'name': '中国人'}]
github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_reset():

    # Get results
    with topen('data/table.csv', headers='row1') as table:
        headers1 = table.headers
        contents1 = table.read()
        table.reset()
        headers2 = table.headers
        contents2 = table.read()

    # Make assertions
    assert headers1 == ['id', 'name']
    assert contents1 == [['1', 'english'], ['2', '中国人']]
    assert headers1 == headers2
    assert contents1 == contents2
github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_local_csv():

    # Get table
    table = topen('data/table.csv')

    # Make assertions
    assert table.headers is None
    assert table.read() == [['id', 'name'], ['1', 'english'], ['2', '中国人']]
github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_headers_json_keyed():

    # Get table
    source = ('text://['
        '{"id": 1, "name": "english"},'
        '{"id": 2, "name": "中国人"}]')
    table = topen(source, headers='row1', format='json')

    # Make assertions
    assert table.headers == ['id', 'name']
    assert list(table.iter(keyed=True)) == [
        {'id': 1, 'name': 'english'},
        {'id': 2, 'name': '中国人'}]
github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_inline_iterator():

    # Get table
    source = iter([['id', 'name'], ['1', 'english'], ['2', '中国人']])
    table = topen(source)

    # Make assertions
    assert table.headers is None
    assert table.read() == [['id', 'name'], ['1', 'english'], ['2', '中国人']]
github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_remote_json_lists():

    # Get table
    table = topen(BASE_URL % 'data/table-lists.json')

    # Make assertions
    assert table.headers is None
    assert table.read() == [['id', 'name'], [1, 'english'], [2, '中国人']]
github frictionlessdata / tabulator-py / tests / test_topen.py View on Github external
def test_sample():

    # Get table
    source = [['id', 'name'], ['1', 'english'], ['2', '中国人']]
    table = topen(source, headers='row1')

    # Make assertions
    assert table.headers == ['id', 'name']
    assert table.sample == [['1', 'english'], ['2', '中国人']]
github frictionlessdata / tableschema-sql-py / examples / storage.py View on Github external
import os
import io
import json
from tabulator import topen
from sqlalchemy import create_engine
from dotenv import load_dotenv; load_dotenv('.env')

from tableschema_sql import Storage


# Get resources
articles_schema = json.load(io.open('data/articles.json', encoding='utf-8'))
comments_schema = json.load(io.open('data/comments.json', encoding='utf-8'))
articles_data = topen('data/articles.csv', with_headers=True).read()
comments_data = topen('data/comments.csv', with_headers=True).read()

# Engine
engine = create_engine(os.environ['POSTGRES_URL'])

# Storage
storage = Storage(engine=engine, prefix='prefix_')

# Delete tables
for table in reversed(storage.tables):
    storage.delete(table)

# Create tables
storage.create(['articles', 'comments'], [articles_schema, comments_schema])

# Write data to tables
storage.write('articles', articles_data)