Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if (re.test(item.name)) {
throw new Error('Names may only contain letters, numbers, and underscores.');
}
// Initialize new rule object
let newRuleItem = cloneDeep(item);
// the default state is 'ENABLED'
if (!item.state) {
newRuleItem.state = 'ENABLED';
}
const payload = await Rule.buildPayload(newRuleItem);
switch (newRuleItem.rule.type) {
case 'onetime': {
await invoke(process.env.invoke, payload);
break;
}
case 'scheduled': {
await this.addRule(newRuleItem, payload);
break;
}
case 'kinesis': {
const ruleArns = await this.addKinesisEventSources(newRuleItem);
newRuleItem = this.updateKinesisRuleArns(newRuleItem, ruleArns);
break;
}
case 'sns': {
if (newRuleItem.state === 'ENABLED') {
const snsSubscriptionArn = await this.addSnsTrigger(newRuleItem);
newRuleItem = this.updateSnsRuleArn(newRuleItem, snsSubscriptionArn);
}
const duplicateHandling = duplicateHandlingType(event);
// use stack and collection names to suffix fileStagingDir
const fileStagingDir = path.join(
(config.fileStagingDir || 'file-staging'),
stack
);
if (!provider) {
const err = new errors.ProviderNotFound('Provider info not provided');
log.error(err);
return Promise.reject(err);
}
const IngestClass = granuleSelector('ingest', provider.protocol);
const ingest = new IngestClass(
buckets,
collection,
provider,
fileStagingDir,
forceDownload,
duplicateHandling
);
return download(ingest, downloadBucket, provider, input.granules)
.then((granules) => {
if (ingest.end) ingest.end();
const output = { granules };
if (collection && collection.process) output.process = collection.process;
if (config.pdr) output.pdr = config.pdr;
log.debug(`SyncGranule Complete. Returning output: ${JSON.stringify(output)}`);
bucket = process.env.protected;
break;
case 'public':
bucket = process.env.public;
isPublic = true;
break;
default:
bucket = process.env.private;
break;
}
const p = url.parse(file.stagingFile);
const filename = path.basename(p.path);
log.info(`${filename} copied`, logDetails);
await S3.copy(path.join(p.host, p.path), bucket, filename, isPublic);
// delete the file from staging
const deleteInfo = S3.parseS3Uri(file.stagingFile);
await S3.delete(deleteInfo.Bucket, deleteInfo.Key);
log.info(`${file.stagingFile} deleted`, logDetails);
file.archivedFile = `s3://${bucket}/${filename}`;
file.name = filename;
}
newFiles[key] = file;
}
return newFiles;
}
isPublic = true;
break;
default:
bucket = process.env.private;
break;
}
const p = url.parse(file.stagingFile);
const filename = path.basename(p.path);
log.info(`${filename} copied`, logDetails);
await S3.copy(path.join(p.host, p.path), bucket, filename, isPublic);
// delete the file from staging
const deleteInfo = S3.parseS3Uri(file.stagingFile);
await S3.delete(deleteInfo.Bucket, deleteInfo.Key);
log.info(`${file.stagingFile} deleted`, logDetails);
file.archivedFile = `s3://${bucket}/${filename}`;
file.name = filename;
}
newFiles[key] = file;
}
return newFiles;
}
bucket = process.env.public;
isPublic = true;
break;
default:
bucket = process.env.private;
break;
}
const p = url.parse(file.stagingFile);
const filename = path.basename(p.path);
log.info(`${filename} copied`, logDetails);
await S3.copy(path.join(p.host, p.path), bucket, filename, isPublic);
// delete the file from staging
const deleteInfo = S3.parseS3Uri(file.stagingFile);
await S3.delete(deleteInfo.Bucket, deleteInfo.Key);
log.info(`${file.stagingFile} deleted`, logDetails);
file.archivedFile = `s3://${bucket}/${filename}`;
file.name = filename;
}
newFiles[key] = file;
}
return newFiles;
}
}
log.info(`Checking ${arn}`);
if (r.status === 'running') {
// check if it the execution has passed the five hours limit
const now = Date.now();
const late = (now - timestamp) > 18000000;
if (late) {
error = {
Error: 'Stopped By Cumulus',
Cause: 'Execution was stopped by Cumulus because it did not finish in 5 hours.'
};
await StepFunction.stop(
arn,
error.Cause,
error.Error
);
await partialRecordUpdate(esClient, arn, 'execution', { status: 'failed', error });
await updateGranulesAndPdrs(esClient, url, error);
}
} else {
if (output.error) {
input.exception = output.error;
input.meta.status = 'failed';
await handlePayload(output);
return;
}
exports.syncGranule = function syncGranule(event) {
const config = event.config;
const input = event.input;
const stack = config.stack;
const buckets = config.buckets;
const provider = config.provider;
const collection = config.collection;
const forceDownload = config.forceDownload || false;
const downloadBucket = config.downloadBucket;
const duplicateHandling = duplicateHandlingType(event);
// use stack and collection names to suffix fileStagingDir
const fileStagingDir = path.join(
(config.fileStagingDir || 'file-staging'),
stack
);
if (!provider) {
const err = new errors.ProviderNotFound('Provider info not provided');
log.error(err);
return Promise.reject(err);
}
const IngestClass = granuleSelector('ingest', provider.protocol);
const ingest = new IngestClass(
buckets,
async function moveGranules(event) {
// we have to post the meta-xml file of all output granules
// first we check if there is an output file
const config = event.config;
const bucketsConfig = new BucketsConfig(config.buckets);
const moveStagedFiles = get(config, 'moveStagedFiles', true);
const cmrGranuleUrlType = get(config, 'cmrGranuleUrlType', 'distribution');
const duplicateHandling = duplicateHandlingType(event);
const granulesInput = event.input.granules;
const cmrFiles = granulesToCmrFileObjects(granulesInput);
const granulesByGranuleId = keyBy(granulesInput, 'granuleId');
let movedGranules;
if (cmrGranuleUrlType === 'distribution' && !config.distribution_endpoint) {
throw new Error('cmrGranuleUrlType is distribution, but no distribution endpoint is configured.');
}
// allows us to disable moving the files
if (moveStagedFiles) {
// update allGranules with aspirational metadata (where the file should end up after moving.)
const granulesToMove = await updateGranuleMetadata(
granulesByGranuleId, config.collection, cmrFiles, bucketsConfig
const s3ObjAlreadyExists = await s3ObjectExists(target);
log.debug(`file ${target.Key} exists in ${target.Bucket}: ${s3ObjAlreadyExists}`);
const options = (bucketsConfig.type(file.bucket).match('public')) ? { ACL: 'public-read' } : null;
let versionedFiles = [];
if (s3ObjAlreadyExists) {
if (markDuplicates) fileMoved.duplicate_found = true;
// returns renamed files for 'version', otherwise empty array
versionedFiles = await handleDuplicateFile({
source,
target,
copyOptions: options,
duplicateHandling
});
} else {
await moveGranuleFile(source, target, options);
}
// return both file moved and renamed files
return [fileMoved]
.concat(versionedFiles.map((f) => ({
bucket: f.Bucket,
name: path.basename(f.Key),
filename: buildS3Uri(f.Bucket, f.Key),
filepath: f.Key,
size: f.size,
url_path: file.url_path
})));
}
generateDocFromPayload(payload) {
const name = get(payload, 'cumulus_meta.execution_name');
const arn = aws.getExecutionArn(
get(payload, 'cumulus_meta.state_machine'),
name
);
if (!arn) {
throw new Error('State Machine Arn is missing. Must be included in the cumulus_meta');
}
const execution = aws.getExecutionUrl(arn);
const collectionId = constructCollectionId(
get(payload, 'meta.collection.name'), get(payload, 'meta.collection.version')
);
const doc = {
name,
arn,
parentArn: get(payload, 'cumulus_meta.parentExecutionArn'),
execution,
tasks: get(payload, 'meta.workflow_tasks'),
error: parseException(payload.exception),
type: get(payload, 'meta.workflow_name'),
collectionId: collectionId,
status: get(payload, 'meta.status', 'unknown'),
createdAt: get(payload, 'cumulus_meta.workflow_start_time'),
timestamp: Date.now()