Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
export const fixClassName = ({ className, ...props }) => {
// const escapedClassName = escapeClassNames(className)
const convertedProps = convertProps(props)
const elementExists = inComponentClassCache(className)
// Extract imageData.
const imageData = getCurrentSrcData(convertedProps)
// Add an additional unique class for multiple s.
const additionalClassname = uuid.generate()
// Create random "uniquely hashed" additionalClass if needed.
const randomClass = ` gbi-${hashString(
(imageData && imageData.srcSet) || className
)}-${additionalClassname}`
// Should an element exist, add randomized class.
const additionalClass = elementExists ? randomClass : ``
const componentClassNames = `${className || ``}${additionalClass ||
``}`.trim()
// Add it to cache if it doesn't exist.
if (!elementExists) activateCacheForComponentClass(className)
return [componentClassNames]
}
async storeShardCheckpoint(shardId, checkpoint, shardsPath, shardsPathNames) {
if (typeof checkpoint !== 'string') throw new TypeError('The sequence number is required.');
const { client, consumerGroup, streamName } = internal(this);
await client.update({
ExpressionAttributeNames: {
...shardsPathNames,
'#b': shardId,
'#c': 'checkpoint',
'#d': 'version'
},
ExpressionAttributeValues: {
':x': checkpoint,
':y': generate()
},
Key: { consumerGroup, streamName },
UpdateExpression: `SET ${shardsPath}.#b.#c = :x, ${shardsPath}.#b.#d = :y`
});
}
}
if (Item && Item.streamCreatedOn !== streamCreatedOn) {
await client.delete({ Key });
logger.warn('Stream state has been reset. Non-matching stream creation timestamp.');
}
try {
await client.put({
ConditionExpression: 'attribute_not_exists(streamName)',
Item: {
consumerGroup,
consumers: {},
enhancedConsumers: {},
shards: {},
streamCreatedOn,
streamName,
version: generate()
}
});
logger.debug('Initial state has been recorded for the stream.');
} catch (err) {
if (err.code !== 'ConditionalCheckFailedException') {
logger.error(err);
throw err;
}
}
}
async registerEnhancedConsumer(name, arn) {
const { client, consumerGroup, logger, streamName, useAutoShardAssignment } = internal(this);
try {
await client.update({
ConditionExpression: 'attribute_not_exists(#a.#b)',
ExpressionAttributeNames: {
'#a': 'enhancedConsumers',
'#b': name
},
ExpressionAttributeValues: {
':x': {
arn,
isStandalone: !useAutoShardAssignment,
isUsedBy: null,
version: generate(),
...(!useAutoShardAssignment && { shards: {} })
}
},
Key: { consumerGroup, streamName },
UpdateExpression: 'SET #a.#b = :x'
});
logger.debug(`The enhanced consumer "${name}" is now registered.`);
} catch (err) {
if (err.code !== 'ConditionalCheckFailedException') {
logger.error(err);
throw err;
}
}
}
const { shardsPath, shardsPathNames } = await this.getShardsData(streamState);
const { client, consumerGroup, logger, streamName } = privateProps;
const { parent } = shardData;
try {
await client.update({
ConditionExpression: `attribute_not_exists(${shardsPath}.#b)`,
ExpressionAttributeNames: { ...shardsPathNames, '#b': shardId },
ExpressionAttributeValues: {
':x': {
checkpoint: null,
depleted: false,
leaseExpiration: null,
leaseOwner: null,
parent,
version: generate()
}
},
Key: { consumerGroup, streamName },
UpdateExpression: `SET ${shardsPath}.#b = :x`
});
} catch (err) {
if (err.code !== 'ConditionalCheckFailedException') {
logger.error(err);
throw err;
}
}
}
ws.on('connection', session => {
sendMessageToSession(session, MESSAGE_TYPES.CONNECTION, { status: 'ok' })
const sessionId = uuid.generate()
sessions[sessionId] = session
session.on('close', () => {
delete sessions[sessionId]
session = null
})
})
}
const createshortUuid = () => shortUuid.generate();
compression,
outputEncoding: 'Buffer',
streamName
});
Object.assign(internal(this), {
awsOptions,
client: new KinesisClient({
awsOptions,
logger: normLogger,
streamName,
supressThroughputWarnings
}),
compression,
consumerGroup,
consumerId: generate(),
createStreamIfNeeded,
dynamoDb,
encryption,
getStatsIntervalId: null,
limit: limitNumber > 0 && limitNumber <= 10000 ? limitNumber : 10000,
logger: normLogger,
maxEnhancedConsumers:
maxConsumersNumber > 0 && maxConsumersNumber <= 20 ? maxConsumersNumber : 5,
noRecordsPollDelay: noRecordsPollDelayNumber >= 250 ? noRecordsPollDelayNumber : 250,
pollDelay: pollDelayNumber >= 0 ? pollDelayNumber : 250,
recordsEncoder,
s3: {
largeItemThreshold: largeItemThresholdNumber,
nonS3Keys: [],
...s3,
bucketName: s3BucketName