How to use the agate.csv.writer function in agate

To help you get started, we’ve selected a few agate examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wireservice / agate / tests / test_agate.py View on Github external
def test_agate(self):
        if six.PY2:
            self.assertIs(agate.csv.reader, agate.csv_py2.reader)
            self.assertIs(agate.csv.writer, agate.csv_py2.writer)
            self.assertIs(agate.csv.DictReader, agate.csv_py2.DictReader)
            self.assertIs(agate.csv.DictWriter, agate.csv_py2.DictWriter)
        else:
            self.assertIs(agate.csv.reader, agate.csv_py3.reader)
            self.assertIs(agate.csv.writer, agate.csv_py3.writer)
            self.assertIs(agate.csv.DictReader, agate.csv_py3.DictReader)
            self.assertIs(agate.csv.DictWriter, agate.csv_py3.DictWriter)
github wireservice / csvkit / csvkit / utilities / csvsql.py View on Github external
with open(self.args.query, 'r') as f:
                        query = f.read()
                else:
                    query = self.args.query

                # Execute the specified SQL queries.
                queries = query.split(';')
                rows = None

                for q in queries:
                    if q.strip():
                        rows = self.connection.execute(q)

                # Output the result of the last query as CSV
                if rows.returns_rows:
                    output = agate.csv.writer(self.output_file, **self.writer_kwargs)
                    output.writerow(rows._metadata.keys)
                    for row in rows:
                        output.writerow(row)

            transaction.commit()
github wireservice / csvkit / csvkit / utilities / csvstack.py View on Github external
if sys.stdin.isatty() and not self.args.input_paths:
            sys.stderr.write('No input file or piped data provided. Waiting for standard input:\n')

        has_groups = self.args.groups is not None or self.args.group_by_filenames

        if self.args.groups is not None and not self.args.group_by_filenames:
            groups = self.args.groups.split(',')

            if len(groups) != len(self.args.input_paths):
                self.argparser.error('The number of grouping values must be equal to the number of CSV files being stacked.')
        else:
            groups = None

        group_name = self.args.group_name if self.args.group_name else 'group'

        output = agate.csv.writer(self.output_file, **self.writer_kwargs)

        for i, path in enumerate(self.args.input_paths):
            f = self._open_input_file(path)

            if isinstance(self.args.skip_lines, int):
                skip_lines = self.args.skip_lines
                while skip_lines > 0:
                    f.readline()
                    skip_lines -= 1
            else:
                raise ValueError('skip_lines argument must be an int')

            rows = agate.csv.reader(f, **self.reader_kwargs)

            if has_groups:
                if groups:
github wireservice / csvkit / csvkit / utilities / csvcut.py View on Github external
def main(self):
        if self.args.names_only:
            self.print_column_names()
            return

        if self.additional_input_expected():
            sys.stderr.write('No input file or piped data provided. Waiting for standard input:\n')

        rows, column_names, column_ids = self.get_rows_and_column_names_and_column_ids(**self.reader_kwargs)

        output = agate.csv.writer(self.output_file, **self.writer_kwargs)
        output.writerow([column_names[column_id] for column_id in column_ids])

        for row in rows:
            out_row = [row[column_id] if column_id < len(row) else None for column_id in column_ids]

            if not self.args.delete_empty or any(out_row):
                output.writerow(out_row)
github wireservice / csvkit / csvkit / utilities / sql2csv.py View on Github external
connection = engine.connect()

        if self.args.query:
            query = self.args.query.strip()
        else:
            query = ""

            self.input_file = self._open_input_file(self.args.input_path)

            for line in self.input_file:
                query += line

            self.input_file.close()

        rows = connection.execution_options(no_parameters=True).execute(query)
        output = agate.csv.writer(self.output_file, **self.writer_kwargs)

        if rows.returns_rows:
            if not self.args.no_header_row:
                output.writerow(rows._metadata.keys)

            for row in rows:
                output.writerow(row)

        connection.close()
github wireservice / csvkit / csvkit / utilities / csvformat.py View on Github external
def main(self):
        if self.additional_input_expected():
            sys.stderr.write('No input file or piped data provided. Waiting for standard input:\n')

        reader = agate.csv.reader(self.skip_lines(), **self.reader_kwargs)
        writer = agate.csv.writer(self.output_file, **self.writer_kwargs)
        writer.writerows(reader)
github wireservice / csvkit / csvkit / convert / geojs.py View on Github external
geometry = feature['geometry']
        geometry_type = geometry.get('type')
        if geometry_type == 'Point':
            longitude, latitude = geometry['coordinates']
        else:
            longitude, latitude = (None, None)

        features_parsed.append((feature.get('id'), properties, json.dumps(geometry), geometry_type, longitude, latitude))

    header = ['id']
    header.extend(property_fields)
    header.extend(('geojson', 'type', 'longitude', 'latitude'))

    o = six.StringIO()
    writer = agate.csv.writer(o)

    writer.writerow(header)

    for geoid, properties, geometry, geometry_type, longitude, latitude in features_parsed:
        row = [geoid]

        for field in property_fields:
            value = properties.get(field)
            if isinstance(value, OrderedDict):
                value = json.dumps(value)
            row.append(value)

        row.extend((geometry, geometry_type, longitude, latitude))

        writer.writerow(row)
github wireservice / csvkit / csvkit / utilities / csvstat.py View on Github external
def print_csv(self, table, column_ids, stats):
        """
        Print data for all statistics as a csv table.
        """
        writer = agate.csv.writer(self.output_file)

        header = ['column_id', 'column_name'] + [op_name for op_name in OPERATIONS.keys()]

        writer.writerow(header)

        for column_id in column_ids:
            column_name = table.column_names[column_id]
            column_stats = stats[column_id]

            output_row = [column_id + 1, column_name]

            for op_name, op_data in OPERATIONS.items():
                if column_stats[op_name] is None:
                    output_row.append(None)
                    continue
github wireservice / csvkit / csvkit / utilities / csvgrep.py View on Github external
rows, column_names, column_ids = self.get_rows_and_column_names_and_column_ids(**reader_kwargs)

        if self.args.regex:
            pattern = re.compile(self.args.regex)
        elif self.args.matchfile:
            lines = set(line.rstrip() for line in self.args.matchfile)

            def pattern(x):
                return x in lines
        else:
            pattern = self.args.pattern

        patterns = dict((column_id, pattern) for column_id in column_ids)
        filter_reader = FilteringCSVReader(rows, header=False, patterns=patterns, inverse=self.args.inverse)

        output = agate.csv.writer(self.output_file, **writer_kwargs)
        output.writerow(column_names)

        for row in filter_reader:
            output.writerow(row)