Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if self.lower_ident:
self.col_names = [c['name'].lower() for c in res['resultSet']['columns']]
else:
self.col_names = [c['name'] for c in res['resultSet']['columns']]
self.col_types = [c['dataType'] for c in res['resultSet']['columns']]
self.num_columns = res['resultSet']['numColumns']
self.num_rows_total = res['resultSet']['numRows']
self.num_rows_chunk = res['resultSet']['numRowsInMessage']
self._check_duplicate_col_names()
elif self.result_type == 'rowCount':
self.row_count = res['rowCount']
else:
raise ExaRuntimeError(self.connection, f'Unknown resultType: {self.result_type}')
def _check_duplicate_col_names(self):
"""
Exasol allows duplicate names in result sets, but it leads to various problems related to dictionaries
PyEXASOL adds additional check to prevent such problems and to allow safe .columns() and fetch_dict=True
"""
duplicate_col_names = [k for (k, v) in collections.Counter(self.col_names).items() if v > 1]
if duplicate_col_names:
raise ExaRuntimeError(self.connection, f'Duplicate column names in result set: {", ".join(duplicate_col_names)}')
self.pos_chunk = 0
self.result_type = None
self.result_set_handle = None
self.statement_handle = None
self.parameter_data = None
# This index may not match STMT_ID in system tables due to automatically executed queries (e.g. autocommit)
self.connection.stmt_count += 1
self.stmt_idx = self.connection.stmt_count
self.execution_time = 0
self.is_closed = False
if self.connection.is_closed:
raise ExaRuntimeError(self.connection, "Exasol connection was closed")
if prepare:
self._prepare()
else:
self._execute()
def add_default_handler(self):
if self.connection.options['debug']:
if self.connection.options['debug_logdir']:
logdir = pathlib.Path(self.connection.options['debug_logdir'])
if not logdir.is_dir():
raise ExaRuntimeError(self.connection, 'Not a directory: ' + str(logdir))
handler = logging.FileHandler(logdir / self._get_log_filename(), encoding='utf-8')
else:
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter('%(asctime)s %(message)s')
formatter.default_msec_format = '%s.%03d'
handler.setFormatter(formatter)
self.addHandler(handler)
"""
INSERT small number of rows into table using prepared statement
It provides better performance for small data sets of 10,000 rows or less compared to .import_from_iterable()
Please use .import_from_iterable() for larger data sets and better memory efficiency
Please use .import_from_pandas() to import from data frame regardless of its size
You may use "columns" argument to specify custom order of columns for insertion
If some columns are not included in this list, NULL or DEFAULT value will be used instead
"""
# Convert possible iterator into list
data = list(data)
if len(data) == 0:
raise ExaRuntimeError(self.connection, "At least one row of data is required for insert_multi()")
params = {
'table_name': self.connection.format.default_format_ident(table_name),
'columns': '',
'values': ', '.join(['?'] * len(data[0]))
}
if columns:
params['columns'] = f"({','.join([self.connection.format.default_format_ident(c) for c in columns])})"
query = "INSERT INTO {table_name!r}{columns!r} VALUES ({values!r})"
stmt = self.connection.cls_statement(self.connection, query, params, prepare=True)
stmt.execute_prepared(data)
stmt.close()
def __next__(self):
if self.pos_total >= self.num_rows_total:
if self.result_type != 'resultSet':
raise ExaRuntimeError(self.connection, 'Attempt to fetch from statement without result set')
raise StopIteration
if self.pos_chunk >= self.num_rows_chunk:
self._next_chunk()
row = next(self.data_zip)
if self.fetch_mapper:
row = tuple(map(self.fetch_mapper, row, self.col_types))
if self.fetch_dict:
row = dict(zip(self.col_names, row))
self.pos_total += 1
self.pos_chunk += 1