Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_utf8(self):
output = StringIO()
writer = unicsv.UnicodeCSVWriter(output, encoding='utf-8')
self.assertEqual(writer._eight_bit, True)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = StringIO(output.getvalue())
reader = unicsv.UnicodeCSVReader(written, encoding='utf-8')
self.assertEqual(reader.next(), ['a', 'b', 'c'])
self.assertEqual(reader.next(), ['1', '2', '3'])
self.assertEqual(reader.next(), ['4', '5', u'ʤ'])
def test_utf16_little(self):
output = StringIO()
writer = unicsv.UnicodeCSVWriter(output, encoding='utf-16-le')
self.assertEqual(writer._eight_bit, False)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = StringIO(output.getvalue())
reader = unicsv.UnicodeCSVReader(written, encoding='utf-16-le')
self.assertEqual(reader.next(), ['a', 'b', 'c'])
self.assertEqual(reader.next(), ['1', '2', '3'])
self.assertEqual(reader.next(), ['4', '5', u'\u02A4'])
def test_utf16_big(self):
output = StringIO()
writer = unicsv.UnicodeCSVWriter(output, encoding='utf-16-be')
self.assertEqual(writer._eight_bit, False)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = StringIO(output.getvalue())
reader = unicsv.UnicodeCSVReader(written, encoding='utf-16-be')
self.assertEqual(reader.next(), ['a', 'b', 'c'])
self.assertEqual(reader.next(), ['1', '2', '3'])
self.assertEqual(reader.next(), ['4', '5', u'\u02A4'])
return cmp(a_type,b_type)
if a_number != b_number:
return cmp(a_number,b_number)
return cmp(a_subtype,b_subtype)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('You must provide the filename for the CSV output as an argument to this script.')
FILENAME = sys.argv[1]
with open(FILENAME,"w") as f:
collection = utils.get_label_collection()
labelset = collection.find_one({ 'dataset': 'SF1' })
w = UnicodeCSVWriter(f)
w.writerow(['table_code','table_desc','table_universe','table_size','col_code','col_desc','indent','parent','has_children','col_code_2000'])
for table_code in sorted(labelset['tables'],cmp=compare_table_codes):
t = labelset['tables'][table_code]
row_base = [table_code,t['name'],t['universe'],t['size']]
for label_code in sorted(t['labels']):
l = t['labels'][label_code]
row = row_base[:]
if l['parent'] is None: parent = ''
else: parent = l['parent']
if l['key_2000'] is None: key_2000 = ''
else: key_2000 = l['key_2000']
row.extend([l['key'],l['text'],l['indent'],parent,l['has_children'],key_2000])
w.writerow(row)
def _init(conference, the_date, url):
#update the date of the conference
year_num = str(the_date.year)[2:]
output_file=conference+year_num+"sched.csv"
# This creates the csv file using the csvkit module and writes to it, creating the header rows
outfile = open(output_file, "w")
w = UnicodeCSVWriter(outfile,delimiter=",",encoding="utf-8")
w.writerow(['Topic', 'Subject','Start Date','Start Time','End Date','End Time','All Day Event','Description','Location','Private'])
private = False
all_day = False
#use urllib2 to send a request to the URL and gather the html response
response = urllib2.urlopen(url)
html = response.read()
#read the html and parse it using Beautiful soup
soup = BeautifulSoup(html)
#The first day of the conference is a Wednesday, or 2, since the list starts counting at 0.
day = 2
days =[ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',
'Sunday' ]
def write_table_data(flo, state_fips, sumlev, table_id):
"""Given a File-Like Object, write a table to it"""
w = UnicodeCSVWriter(flo)
metadata = fetch_table_label(table_id)
header = ['GEOID', 'SUMLEV'] + METADATA_HEADERS + ['POP100.2000','HU100.2000']
for key in sorted(metadata['labels']):
header.extend([key,"%s.2000" % key])
w.writerow(header)
query = {'sumlev': sumlev, 'metadata.STATE': state_fips }
collection = utils.get_geography_collection()
for geography in collection.find(query):
row = [geography['geoid'],geography['sumlev']]
for h in METADATA_HEADERS:
row.append(geography['metadata'][h])
def __init__(self, f, fieldnames, writeheader=False, restval="", extrasaction="raise", *args, **kwds):
self.fieldnames = fieldnames
self.restval = restval
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = UnicodeCSVWriter(f, *args, **kwds)
if writeheader:
self.writerow(dict(zip(self.fieldnames, self.fieldnames)))
def writeCSV(fpath, output):
with open(fpath, 'wb') as f:
writer = UnicodeCSVWriter(f)
writer.writerows(output)