How to use the xlrd.open_workbook function in xlrd

To help you get started, we’ve selected a few xlrd examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github coolx28 / penetration-testing-tools-cheat-sheet / post_win / windows-exploit-suggester.py View on Github external
dbfile.close()

    # xls or xslx
    elif 'xls' in extension:

      ALERT("database file detected as xls or xlsx based on extension", ALERT.NORMAL)

      try:
        import xlrd
      except ImportError as e:
        ALERT("please install and upgrade the python-xlrd library", ALERT.BAD)
        exit(1)

      # open the xls file
      try:
        wb = xlrd.open_workbook(ARGS.database)
      except IOError as e:
        ALERT("no such file or directory '%s'. ensure you have the correct database file passed in --database/-d" % ARGS.database, ALERT.BAD)
        exit(1)
      sh = wb.sheet_by_name('Bulletin Search')

      # read the spreadsheet into a temp file
      f = NamedTemporaryFile(mode='wb')
      wr = csv.writer(f, quoting=csv.QUOTE_NONE, delimiter=',')

      data = ''

      # loop through xls
      for rownum in xrange(sh.nrows):

        values = sh.row_values(rownum)
github ExtensiveAutomation / extensiveautomation-server / test-library / TestPropertiesLib.py View on Github external
def decodeExcel(data, worksheet, row=None, column=None):
    """
    Decode excel file
    """
    content = None
    
    # old file format
    try:
        wb = xlrd.open_workbook(file_contents=data)
    except Exception as e:
        raise TestPropertiesException('ERR_PRO_023: excel data not supported')

    # read sheet
    try:
        ws = wb.sheet_by_name(worksheet)
    except Exception as e:
        wb.release_resources()
        raise TestPropertiesException('ERR_PRO_024: worksheet %s does not exists on excel file' % worksheet)
    
    # read cell 
    if row is not None and column is not None:
        col = ws.col_values( int(column) )
        content = col[ int(row) ]
        
    # read column
github shawnbrown / datatest / datatest / __past__ / api07_sources.py View on Github external
def __init__(self, path, worksheet=None, in_memory=False):
        """Initialize self."""
        try:
            import xlrd
        except ImportError:
            raise ImportError(
                "No module named 'xlrd'\n"
                "\n"
                "This is an optional data source that requires the "
                "third-party library 'xlrd'."
            )

        self._file_repr = repr(path)

        # Open Excel file and get worksheet.
        book = xlrd.open_workbook(path, on_demand=True)
        if worksheet:
            sheet = book.sheet_by_name(worksheet)
        else:
            sheet = book.sheet_by_index(0)

        # Build SQLite table from records, release resources.
        iterrows = (sheet.row(i) for i in range(sheet.nrows))
        iterrows = ([x.value for x in row] for row in iterrows)
        columns = next(iterrows)  # <- Get header row.
        connection, table = _load_temp_sqlite_table(columns, iterrows)
        book.release_resources()

        # Calling super() with older convention to support Python 2.7 & 2.6.
        super(ExcelSource, self).__init__(connection, table)
github DangKaio / python-unittest-requests / case_excel / read_excel.py View on Github external
def __init__(self, excelPath, sheetName):
        self.data = xlrd.open_workbook(excelPath)
        self.table = self.data.sheet_by_name(sheetName)
        # 获取第一行作为key值
        self.keys = self.table.row_values(0)
        # 获取总行数
        self.rowNum = self.table.nrows
        # 获取总列数
        self.colNum = self.table.ncols
github NJ-zero / request-api / common / getexceldata.py View on Github external
def get_nrows(name):
    '''
    读取行数
    :param name: sheet名称
    :return:
    '''
    alldata=xlrd.open_workbook(file)
    sheet = alldata.sheet_by_name(name)
    nrows = sheet.nrows
    return nrows
github sahana / eden / modules / templates / IRS / ebola.cases.update.py View on Github external
"District Nord": "Ourossogui", # specific to the 1 current case ;)
                  }
# @ToDo:
# - Allow filtering to just a single country
# - Allow filtering to a range of dates (priority: since xxx to get a diff. NB This is just to save time, since the deduplicator should prevent dupes)
# - Make script more widely usable?: other resources (e.g. Hospitals)

import string
import urllib2
import xlrd

# Open the file from the remote server
# @todo write the remote file to a temp file and then pass to load_workbook
print "Downloading data..."
u = urllib2.urlopen(SOURCE_URL)
wb = xlrd.open_workbook(file_contents=u.read())
ws = wb.sheet_by_name(SOURCE_SHEET)

# Load models
table = s3db.gis_location
otable = s3db.org_organisation

rejected_loc = {}
new_org = {}

# Utility functions
def lookup_loc(location, country):
    """
        Location Names need to match what we have already
    """
    corrected = location_names.get(location)
    if corrected:
github alephdata / aleph / services / ingest-file / ingestors / tabular / xls.py View on Github external
def ingest(self, file_path, entity):
        entity.schema = model.get('Workbook')
        self.extract_ole_metadata(file_path, entity)
        try:
            book = xlrd.open_workbook(file_path, formatting_info=False)
        except Exception as err:
            raise ProcessingException('Invalid Excel file: %s' % err) from err

        try:
            for sheet in book.sheets():
                table = self.manager.make_entity('Table', parent=entity)
                table.make_id(entity.id, sheet.name)
                table.set('title', sheet.name)
                self.emit_row_tuples(table, self.generate_csv(sheet))
                if table.has('csvHash'):
                    self.manager.emit_entity(table)
        except XLRDError as err:
            raise ProcessingException('Invalid Excel file: %s' % err) from err
        finally:
            book.release_resources()
github Nexedi / dream / dream / KnowledgeExtraction / PilotCases / DemandPlanning / InputData_DemandPlanning.py View on Github external
def generateDemandPlanning(input_url, PPOSQuantity=1000, PlannedWeek=1, PPOSToBeDisaggregated='PPOS1', 
                           MinPackagingSize=10, planningHorizon=10):
    """Generate random demand from spreadsheet at input_url.
    """
    # id is given as an integer and minus one
    # ToDo we have to standardize data
#     PPOSToBeDisaggregated='PPOS'+str(PPOSToBeDisaggregated+'1')
    
    # Read data from the exported Excel file from RapidMiner and call the Import_Excel object of the KE tool to import this data in the tool

    demand_data = urllib.urlopen(input_url).read()
    workbook = xlrd.open_workbook(file_contents=demand_data)

    worksheets = workbook.sheet_names()
    worksheet_RapidMiner = worksheets[0] 

    A= Import_Excel()
    Turnovers=A.Input_data(worksheet_RapidMiner, workbook) #Dictionary with the data from the Excel file

    #Create lists with the MAs' names and the Turnovers for the first twelve weeks of 2010 retrieving this data from the dictionary 
    PPOS=Turnovers.get('Ppos',[])
    SP=Turnovers.get('SP',[])
    MA=Turnovers.get('FP Material No PGS+',[])
    GlobalDemand=Turnovers.get('Global demand',[])

    #Call the Distributions object and fit the data from the list in Normal distribution, so as to have info on Global demand (mean and standard deviation)
    D=Distributions()
    E=HandleMissingValues()
github bl166 / USPowerPlantDataset / P1DATAPREP_cropPowerPlants.py View on Github external
def download_ppt_pic(id_start,id_end,*args, **kwargs):
	# set default options
	collection = kwargs.get('collection', 'naip')
	order = kwargs.get('order', 'normal')
	# open the document with coordinates of US power plants
	egrid = open_workbook("egrid2014_data_v2_PLNT14.xlsx").sheet_by_index(0) # if your data is on sheet 1

	# DEFINE YOUR IMAGE COLLECTION HERE
	#************** NAIP imagery **************
	if collection=='naip':
		collection_naip = ee.ImageCollection('USDA/NAIP/DOQQ').filter(ee.Filter.date('2012-01-01', '2014-12-31'))
		# reduce the image stack to one image
		image = collection_naip.mosaic()
		# resolution = 1m
		res = 1

	#********** Pan-sharpened Landsat **********
	elif collection=='ls8':
		# collection_ls8 = ee.ImageCollection('LANDSAT/LC8_L1T_TOA').filterDate('2014-01-01', '2014-12-30')
		# # reduce the image stack to every pixel's median value
		# img_red = collection_ls8.reduce(ee.Reducer.median())
github pandas-dev / pandas / pandas / io / excel / _xlrd.py View on Github external
def load_workbook(self, filepath_or_buffer):
        from xlrd import open_workbook

        if hasattr(filepath_or_buffer, "read"):
            data = filepath_or_buffer.read()
            return open_workbook(file_contents=data)
        else:
            return open_workbook(filepath_or_buffer)