Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# hadoop
if destination_system_technology == DataSystem.SystemTechnology.HIVE:
if abstract_table.storage_type == DataSystem.StorageType.S3:
from m3d.hadoop.emr.emr_system import EMRSystem
emr_system = EMRSystem(
config,
destination_system,
destination_database,
destination_environment,
emr_cluster_id
)
emr_system.add_cluster_tag(EMRSystem.EMRClusterTag.API_METHOD, M3D.drop_table.__name__)
emr_system.drop_table(destination_table)
else:
raise m3d_exceptions.M3DUnsupportedStorageException(abstract_table.storage_type)
else:
raise m3d_exceptions.M3DUnsupportedDestinationSystemException(destination_system_technology)
# hadoop
if ds.database_type == DataSystem.DatabaseType.EMR:
if ds.storage_type == DataSystem.StorageType.S3:
from m3d.hadoop.load.load_executor_hadoop import LoadExecutorHadoop
LoadExecutorHadoop.create(
config_path=config,
destination_system=destination_system,
destination_database=destination_database,
destination_environment=destination_environment,
destination_table=destination_table,
load_type=load_type,
emr_cluster_id=emr_cluster_id,
spark_params_str=spark_params
).run()
else:
raise m3d_exceptions.M3DUnsupportedStorageException(ds.storage_type)
else:
raise m3d_exceptions.M3DUnsupportedDatabaseTypeException(ds.database_type)
# hadoop
if destination_system_technology == DataSystem.SystemTechnology.HIVE:
if abstract_table.storage_type == DataSystem.StorageType.S3:
from m3d.hadoop.emr.emr_system import EMRSystem
emr_system = EMRSystem(
config,
destination_system,
destination_database,
destination_environment,
emr_cluster_id
)
emr_system.add_cluster_tag(EMRSystem.EMRClusterTag.API_METHOD, M3D.create_table.__name__)
emr_system.create_table(destination_table)
else:
raise m3d_exceptions.M3DUnsupportedStorageException(abstract_table.storage_type)
else:
raise m3d_exceptions.M3DUnsupportedDestinationSystemException(destination_system_technology)
# hadoop
if destination_system_technology == DataSystem.SystemTechnology.HIVE:
if abstract_table.storage_type == DataSystem.StorageType.S3:
from m3d.hadoop.emr.emr_system import EMRSystem
emr_system = EMRSystem(
config,
destination_system,
destination_database,
destination_environment,
emr_cluster_id
)
emr_system.add_cluster_tag(EMRSystem.EMRClusterTag.API_METHOD, M3D.drop_lake_out_view.__name__)
emr_system.drop_lake_out_view(destination_table)
else:
raise m3d_exceptions.M3DUnsupportedStorageException(abstract_table.storage_type)
else:
raise m3d_exceptions.M3DUnsupportedDestinationSystemException(destination_system_technology)
# hadoop
if destination_system_technology == DataSystem.SystemTechnology.HIVE:
if abstract_table.storage_type == DataSystem.StorageType.S3:
from m3d.hadoop.emr.emr_system import EMRSystem
emr_system = EMRSystem(
config,
destination_system,
destination_database,
destination_environment,
emr_cluster_id
)
emr_system.add_cluster_tag(EMRSystem.EMRClusterTag.API_METHOD, M3D.create_lake_out_view.__name__)
emr_system.create_lake_out_view(destination_table)
else:
raise m3d_exceptions.M3DUnsupportedStorageException(abstract_table.storage_type)
else:
raise m3d_exceptions.M3DUnsupportedDestinationSystemException(destination_system_technology)
destination_system_technology = abstract_table.get_destination_technology()
if destination_system_technology == DataSystem.SystemTechnology.HIVE:
if abstract_table.storage_type == DataSystem.StorageType.S3:
from m3d.hadoop.emr.emr_system import EMRSystem
emr_system = EMRSystem(
config,
destination_system,
destination_database,
destination_environment,
emr_cluster_id
)
emr_system.add_cluster_tag(EMRSystem.EMRClusterTag.API_METHOD, M3D.truncate_table.__name__)
emr_system.truncate_table(destination_table)
else:
raise m3d_exceptions.M3DUnsupportedStorageException(abstract_table.storage_type)
else:
raise m3d_exceptions.M3DUnsupportedDestinationSystemException(destination_system_technology)