How to use the tablib.Dataset function in tablib

To help you get started, we’ve selected a few tablib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cloudera / hue / desktop / core / ext-py / tablib-develop / test_tablib.py View on Github external
def test_databook_add_sheet_accepts_dataset_subclasses(self):
        class DatasetSubclass(tablib.Dataset):
            pass

        # just checking if subclass of tablib.Dataset can be added to Databook
        dataset = DatasetSubclass()
        dataset.append(self.john)
        dataset.append(self.tom)

        try:
            book.add_sheet(dataset)
        except tablib.InvalidDatasetType:
            self.fail("Subclass of tablib.Dataset should be accepted by Databook.add_sheet")
github jazzband / tablib / tests / test_tablib.py View on Github external
def test_csv_column_delete(self):
        """Build up a CSV and test deleting a column"""

        data = tablib.Dataset()
        data.csv = self.founders.csv

        target_header = data.headers[0]
        self.assertTrue(isinstance(target_header, str))

        del data[target_header]

        self.assertTrue(target_header not in data.headers)
github jazzband / tablib / tests / test_tablib.py View on Github external
def test_latex_export_none_values(self):
        headers = ['foo', None, 'bar']
        d = tablib.Dataset(['foo', None, 'bar'], headers=headers)
        output = d.latex
        self.assertTrue('foo' in output)
        self.assertFalse('None' in output)
github adamcharnock / swiftwind / swiftwind / transactions / tests.py View on Github external
def test_error_both_amounts(self):
        dataset = tablib.Dataset(
            ['15/6/2016', '5.10', '1.20', 'Example payment'],
            headers=['date', 'amount_in', 'amount_out', 'description']
        )
        result = self.makeResource().import_data(dataset)
        self.assertEqual(len(result.row_errors()), 1)
        self.assertIn('Values found for both', str(result.row_errors()[0][1][0].error))
github nditech / apollo / apollo / submissions / views_submissions.py View on Github external
"""
    form = services.forms.get_or_404(pk=form_pk, form_type='INCIDENT')
    location_type = services.location_types.objects.get_or_404(
        pk=location_type_pk)
    if location_pk:
        location = services.locations.get_or_404(pk=location_pk)
        qs = services.submissions.find(submission_type='O', form=form) \
            .filter_in(location)
    else:
        qs = services.submissions.find(submission_type='O', form=form)

    event = get_event()
    tags = [fi.name for group in form.groups for fi in group.fields]
    qs = qs(created__lte=event.end_date, created__gte=event.start_date)
    df = qs.dataframe()
    ds = Dataset()
    ds.headers = ['LOC'] + tags + ['TOT']

    for summary in incidents_csv(df, location_type.name, tags):
        ds.append([summary.get(heading) for heading in ds.headers])

    return ds.csv
github olivierfriard / BORIS / boris / project_functions.py View on Github external
file_name: str,
                             output_format: str) -> bool:
    """
    create file with a list of selected observations

    Args:
        pj (dict): project dictionary
        selected_observations (list): list of observations to export
        file_name (str): path of file to save list of observations
        output_format (str): format output

    Returns:
        bool: True of OK else False
    """

    data = tablib.Dataset()
    data.headers = ["Observation id", "Date", "Description", "Subjects", "Media files/Live observation"]

    indep_var_header = []
    if INDEPENDENT_VARIABLES in pj:
        for idx in utilities.sorted_keys(pj[INDEPENDENT_VARIABLES]):
            indep_var_header.append(pj[INDEPENDENT_VARIABLES][idx]["label"])
    data.headers.extend(indep_var_header)

    for obs_id in selected_observations:

        subjects_list = sorted(list(set([x[EVENT_SUBJECT_FIELD_IDX] for x in pj[OBSERVATIONS][obs_id][EVENTS]])))
        if "" in subjects_list:
            subjects_list = [NO_FOCAL_SUBJECT] + subjects_list
            subjects_list.remove("")
        subjects = ", ".join(subjects_list)
github giantoak / unicorn / app / bulk.py View on Github external
def bulk_search(queries):
    """

    :param list queries: List of elasticsearch queries
    :return tablib.Dataset:
    """
    data = tablib.Dataset(headers=['filename', 'id', 'query'])
    for q in queries:
        r = es.search(q=q, fields=['title'], size=100, index=es_index,
                      doc_type="attachment")
        for res in r['hits']['hits']:
            title = res['fields']['title'][0]
            _id = res['_id']
            
            data.append((title, _id, q))

    return data
github tlambert03 / FPbase / proteins / extrest / tables.py View on Github external
def table2dataset(table):
    if isinstance(table, str):
        table = BeautifulSoup(table, "lxml").find_all("table")[0]
    data = tablib.Dataset()
    # data.headers = [head.text for head in table.find('thead').find_all('th')]
    headings = first_row(table)
    data.headers = headings
    for row in table.find("tbody").find_all("tr"):
        data.append(tuple([td.text.strip() for td in row.find_all("td")]))
    return data
github propublica / django-collaborative / collaborative / export.py View on Github external
def export(self, queryset=None, *args, **kwargs):
        """
        Exports a resource and handles reverse FK relationships.
        """

        self.before_export(queryset, *args, **kwargs)

        if queryset is None:
            queryset = self.get_queryset()
        headers = self.get_export_headers()
        self.add_reverse_fk_headers(headers)
        data = tablib.Dataset(headers=headers)

        if isinstance(queryset, QuerySet):
            # Iterate without the queryset cache, to avoid wasting memory when
            # exporting large datasets.
            iterable = queryset.iterator()
        else:
            iterable = queryset
        for obj in iterable:
            export_resource = self.export_resource(obj)
            self.add_reverse_fk_values(export_resource, obj)
            data.append(export_resource)

        self.after_export(queryset, data, *args, **kwargs)

        return data
github CityOfNewYork / NYCOpenRecords / app / report / utils.py View on Github external
total_closed = Requests.query.with_entities(
        Requests.id,
        Requests.status,
        func.to_char(Requests.date_created, 'MM/DD/YYYY'),
        func.to_char(Requests.date_closed, 'MM/DD/YYYY'),
        func.to_char(Requests.due_date, 'MM/DD/YYYY'),
    ).filter(
        Requests.agency_ein == agency_ein,
        Requests.status == CLOSED,
    ).order_by(asc(Requests.date_created)).all()
    total_closed_headers = ('Request ID',
                            'Status',
                            'Date Created',
                            'Date Closed',
                            'Due Date')
    total_closed_dataset = tablib.Dataset(*total_closed,
                                          headers=total_closed_headers,
                                          title='All Closed requests')

    # Query for all requests that have been opened and closed in the same given month.
    opened_closed_in_month = Requests.query.with_entities(
        Requests.id,
        Requests.status,
        func.to_char(Requests.date_created, 'MM/DD/YYYY'),
        func.to_char(Requests.due_date, 'MM/DD/YYYY'),
    ).filter(
        Requests.date_created.between(date_from_utc, date_to_utc),
        Requests.agency_ein == agency_ein,
        Requests.status == CLOSED,
    ).order_by(asc(Requests.date_created)).all()
    opened_closed_in_month_headers = ('Request ID',
                                      'Status',