Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
decryption: {
type: 'object',
required: ['keys', 'roomId'],
properties: {
keys: {
type: 'array',
items: { type: 'string' }
},
roomId: { type: 'string' }
}
}
}
}
const validator = new Ajv()
const validateMessage = validator.compile(MESSAGE_FORMAT)
const limiter = new Bottleneck({ maxConcurrent: 25 })
/**
* Origin Messaging Client
*
* To use:
*
* ```
* const messaging = new Messaging(options)
* await messaging.init(this.address)
* await messaging.startConversing()
* // Once ready:
* await messaging.sendConvMessage(aliceAddress, { content: 'Hi' })
* // Once someone else's messages have arrived
* const messages = messaging.getAllMessages(aliceAddress)
* ```
*
const throttler_options = {
minTime: 10, // roughly 100 requests per second
id:
'id' +
crypto
.createHash('md5')
.update(this.options.developer_token)
.digest('hex'), // don't want to leak dev token to redis
/* Clustering options */
datastore: this.options.redis_options ? 'redis' : 'local',
clearDatastore: false,
clientOptions: this.options.redis_options,
timeout: 1000 * 60 * 10,
}
this.throttler = new Bottleneck(throttler_options)
this.throttler.on('error', err => {
console.error('Could not connect to redis: ')
console.error(err)
})
}
function alexaLocal(options) {
debug("Connecting to Homebridge Smart Home Skill");
// Throttle event's to match Amazon's Rate API
// Limit events to one every 30 seconds, and keep at most 5 minutes worth
limiter = new Bottleneck({
maxConcurrent: 1,
highWater: 10,
minTime: 10000,
strategy: Bottleneck.strategy.BLOCK
});
limiter.on("dropped", function(dropped) {
console.log("WARNING: ( homebridge-alexa) Dropped event message, message rate too high.");
});
username = options.username;
connection.client = mqtt.connect(options);
// connection.client.setMaxListeners(0);
connection.client.on('connect', function() {
debug('connect', "command/" + options.username + "/#");
connection.client.removeAllListeners('message'); // This hangs up everyone on the channel
connection.client.subscribe("command/" + options.username + "/#");
connection.client.publish("presence/" + options.username + "/1", JSON.stringify({
Connected: options.username,
version: packageConfig.version
export const restoreUsers = async (cognito: CognitoISP, UserPoolId: string, file: string, password?: string, passwordModulePath?: String, delayDurationInMillis: number = 0) => {
if (UserPoolId == 'all') throw Error(`'all' is not a acceptable value for UserPoolId`);
let pwdModule: any = null;
if (typeof passwordModulePath === 'string') {
pwdModule = require(passwordModulePath);
}
const { UserPool } = await cognito.describeUserPool({ UserPoolId }).promise();
const UsernameAttributes = UserPool && UserPool.UsernameAttributes || [];
const limiter = new Bottleneck({ minTime: 2000 });
const readStream = fs.createReadStream(file);
const parser = JSONStream.parse();
parser.on('data', async (data: any[]) => {
for (let user of data) {
// filter out non-mutable attributes
const attributes = user.Attributes.filter((attr: AttributeType) => attr.Name !== 'sub');
const params: AdminCreateUserRequest = {
UserPoolId,
Username: user.Username,
UserAttributes: attributes
};
// Set Username as email if UsernameAttributes of UserPool contains email
if (UsernameAttributes.includes('email')) {
constructor(token: string) {
this.resetCache();
this._token = token;
this._github = new GitHub({
// version: '3.0.0',
protocol: 'https',
});
this._github.authenticate({type: 'oauth', token: token});
// TODO: Make the arguments to rate limiter configurable.
this._cloneRateLimiter = new Bottleneck(20, 100);
this._cloneOptions = {
fetchOpts: {
callbacks: {
certificateCheck() {
return 1;
},
credentials(_url: string, _userName: string) {
return nodegit.Cred.userpassPlaintextNew(token, 'x-oauth-basic');
}
}
}
};
}
constructor(
options: DiscourseFetchOptions,
// fetchImplementation shouldn't be provided by clients, but is convenient for testing.
fetchImplementation?: typeof fetch,
// Used to avoid going over the Discourse API rate limit
minTimeMs?: number
) {
this.options = options;
const minTime = NullUtil.orElse(
minTimeMs,
(1000 * 60) / MAX_API_REQUESTS_PER_MINUTE
);
// n.b. the rate limiting isn't programmatically tested. However, it's easy
// to tell when it's broken: try to load a nontrivial Discourse server, and see
// if you get a 429 failure.
const limiter = new Bottleneck({minTime});
const unlimitedFetch = NullUtil.orElse(fetchImplementation, fetch);
this._fetchImplementation = limiter.wrap(unlimitedFetch);
}
break;
case 'Working draft or equivalent':
normalized = 'working-draft-or-equivalent';
break;
default:
validateWarning(`Unmapped standardization status: ${status}`);
normalized = 'invalid';
break;
}
feature.spec_status = normalized;
});
}
// Bugzilla has a limit on concurrent connections. I haven't found what the
// limit is, but 20 seems to work.
const bugzillaBottleneck = new Bottleneck(20);
function bugzillaFetch(bugzillaUrl) {
return bugzillaBottleneck.schedule(cache.readJson, bugzillaUrl);
}
function getBugzillaBugData(bugId, options) {
const includeFields = options.include_fields.join(',');
return bugzillaFetch(`https://bugzilla.mozilla.org/rest/bug?id=${bugId}&include_fields=${includeFields}`, options)
.then((json) => {
if (!json.bugs.length) {
throw new Error('Bug not found(secure bug?)');
}
return json.bugs[0];
})
.catch((reason) => {
validateWarning(`Failed to get bug data for: ${bugId}: ${reason}`);
const app = express()
app.use(compression())
app.use(cors())
app.set('trust proxy', JSON.parse(process.env.TRUST_PROXY || 'false'))
const elastic = new elasticsearch.Client({
apiVersion: '5.5',
host: process.env.ELASTICSEARCH_URL,
})
const esLimiter = new Bottleneck({
maxConcurrent: JSON.parse(process.env.MAX_CONCURRENT_ES_REQUESTS || '100'),
highWater: JSON.parse(process.env.MAX_QUEUED_ES_REQUESTS || '1000'),
strategy: Bottleneck.strategy.OVERFLOW,
})
esLimiter.on('error', error => {
logger.error(error)
})
const warnRequestTimedOut = throttledWarning(n => `${n} ES requests timed out`, 60000)
const warnRequestDropped = throttledWarning(n => `${n} ES requests dropped`, 60000)
const scheduleElasticsearchRequest = fn => {
return new Promise((resolve, reject) => {
let canceled = false
// If task sits in the queue for more than 30s, cancel it and notify the user.
const timeout = setTimeout(() => {
canceled = true
;(async () => {
const elastic = new elasticsearch.Client({
apiVersion: '5.5',
host: process.env.ELASTICSEARCH_URL,
})
const esLimiter = new Bottleneck({
maxConcurrent: JSON.parse(process.env.MAX_CONCURRENT_ES_REQUESTS || '100'),
highWater: JSON.parse(process.env.MAX_QUEUED_ES_REQUESTS || '1000'),
strategy: Bottleneck.strategy.OVERFLOW,
})
esLimiter.on('error', error => {
logger.error(error)
})
const warnRequestTimedOut = throttledWarning(n => `${n} ES requests timed out`, 60000)
const warnRequestDropped = throttledWarning(n => `${n} ES requests dropped`, 60000)
const scheduleElasticsearchRequest = fn => {
return new Promise((resolve, reject) => {
let canceled = false
// If task sits in the queue for more than 30s, cancel it and notify the user.
const timeout = setTimeout(() => {
canceled = true
// Log all received webhooks
this.webhook.on('*', async (event: Webhooks.WebhookEvent) => {
await this.receive(event)
})
// Log all webhook errors
this.webhook.on('error', this.errorHandler)
if (options.redisConfig || process.env.REDIS_URL) {
let client
if (options.redisConfig) {
client = new Redis(options.redisConfig)
} else if (process.env.REDIS_URL) {
client = new Redis(process.env.REDIS_URL)
}
const connection = new Bottleneck.IORedisConnection({ client })
connection.on('error', this.logger.error)
this.throttleOptions = {
Bottleneck,
connection
}
}
}