Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
decryption: {
type: 'object',
required: ['keys', 'roomId'],
properties: {
keys: {
type: 'array',
items: { type: 'string' }
},
roomId: { type: 'string' }
}
}
}
}
const validator = new Ajv()
const validateMessage = validator.compile(MESSAGE_FORMAT)
const limiter = new Bottleneck({ maxConcurrent: 25 })
/**
* Origin Messaging Client
*
* To use:
*
* ```
* const messaging = new Messaging(options)
* await messaging.init(this.address)
* await messaging.startConversing()
* // Once ready:
* await messaging.sendConvMessage(aliceAddress, { content: 'Hi' })
* // Once someone else's messages have arrived
* const messages = messaging.getAllMessages(aliceAddress)
* ```
*
const throttler_options = {
minTime: 10, // roughly 100 requests per second
id:
'id' +
crypto
.createHash('md5')
.update(this.options.developer_token)
.digest('hex'), // don't want to leak dev token to redis
/* Clustering options */
datastore: this.options.redis_options ? 'redis' : 'local',
clearDatastore: false,
clientOptions: this.options.redis_options,
timeout: 1000 * 60 * 10,
}
this.throttler = new Bottleneck(throttler_options)
this.throttler.on('error', err => {
console.error('Could not connect to redis: ')
console.error(err)
})
}
export const restoreUsers = async (cognito: CognitoISP, UserPoolId: string, file: string, password?: string, passwordModulePath?: String, delayDurationInMillis: number = 0) => {
if (UserPoolId == 'all') throw Error(`'all' is not a acceptable value for UserPoolId`);
let pwdModule: any = null;
if (typeof passwordModulePath === 'string') {
pwdModule = require(passwordModulePath);
}
const { UserPool } = await cognito.describeUserPool({ UserPoolId }).promise();
const UsernameAttributes = UserPool && UserPool.UsernameAttributes || [];
const limiter = new Bottleneck({ minTime: 2000 });
const readStream = fs.createReadStream(file);
const parser = JSONStream.parse();
parser.on('data', async (data: any[]) => {
for (let user of data) {
// filter out non-mutable attributes
const attributes = user.Attributes.filter((attr: AttributeType) => attr.Name !== 'sub');
const params: AdminCreateUserRequest = {
UserPoolId,
Username: user.Username,
UserAttributes: attributes
};
// Set Username as email if UsernameAttributes of UserPool contains email
if (UsernameAttributes.includes('email')) {
constructor(token: string) {
this.resetCache();
this._token = token;
this._github = new GitHub({
// version: '3.0.0',
protocol: 'https',
});
this._github.authenticate({type: 'oauth', token: token});
// TODO: Make the arguments to rate limiter configurable.
this._cloneRateLimiter = new Bottleneck(20, 100);
this._cloneOptions = {
fetchOpts: {
callbacks: {
certificateCheck() {
return 1;
},
credentials(_url: string, _userName: string) {
return nodegit.Cred.userpassPlaintextNew(token, 'x-oauth-basic');
}
}
}
};
}
constructor(
options: DiscourseFetchOptions,
// fetchImplementation shouldn't be provided by clients, but is convenient for testing.
fetchImplementation?: typeof fetch,
// Used to avoid going over the Discourse API rate limit
minTimeMs?: number
) {
this.options = options;
const minTime = NullUtil.orElse(
minTimeMs,
(1000 * 60) / MAX_API_REQUESTS_PER_MINUTE
);
// n.b. the rate limiting isn't programmatically tested. However, it's easy
// to tell when it's broken: try to load a nontrivial Discourse server, and see
// if you get a 429 failure.
const limiter = new Bottleneck({minTime});
const unlimitedFetch = NullUtil.orElse(fetchImplementation, fetch);
this._fetchImplementation = limiter.wrap(unlimitedFetch);
}
break;
case 'Working draft or equivalent':
normalized = 'working-draft-or-equivalent';
break;
default:
validateWarning(`Unmapped standardization status: ${status}`);
normalized = 'invalid';
break;
}
feature.spec_status = normalized;
});
}
// Bugzilla has a limit on concurrent connections. I haven't found what the
// limit is, but 20 seems to work.
const bugzillaBottleneck = new Bottleneck(20);
function bugzillaFetch(bugzillaUrl) {
return bugzillaBottleneck.schedule(cache.readJson, bugzillaUrl);
}
function getBugzillaBugData(bugId, options) {
const includeFields = options.include_fields.join(',');
return bugzillaFetch(`https://bugzilla.mozilla.org/rest/bug?id=${bugId}&include_fields=${includeFields}`, options)
.then((json) => {
if (!json.bugs.length) {
throw new Error('Bug not found(secure bug?)');
}
return json.bugs[0];
})
.catch((reason) => {
validateWarning(`Failed to get bug data for: ${bugId}: ${reason}`);
const getThrottler = _.memoize((rate, limit, globalThrottler) =>
new Bottleneck({ minTime: limit[rate] }).chain(globalThrottler)
);
import gnomadSchema from './schema'
import { UserVisibleError } from './schema/errors'
import logger, { throttledWarning } from './utilities/logging'
const app = express()
app.use(compression())
app.use(cors())
app.set('trust proxy', JSON.parse(process.env.TRUST_PROXY || 'false'))
const elastic = new elasticsearch.Client({
apiVersion: '5.5',
host: process.env.ELASTICSEARCH_URL,
})
const esLimiter = new Bottleneck({
maxConcurrent: JSON.parse(process.env.MAX_CONCURRENT_ES_REQUESTS || '100'),
highWater: JSON.parse(process.env.MAX_QUEUED_ES_REQUESTS || '1000'),
strategy: Bottleneck.strategy.OVERFLOW,
})
esLimiter.on('error', error => {
logger.error(error)
})
const warnRequestTimedOut = throttledWarning(n => `${n} ES requests timed out`, 60000)
const warnRequestDropped = throttledWarning(n => `${n} ES requests dropped`, 60000)
const scheduleElasticsearchRequest = fn => {
return new Promise((resolve, reject) => {
let canceled = false
constructor(exchange: string, private readonly exchangeConnection: Exchange) {
if (!(exchange in adapters)) throw new Error(`No adapter for ${exchange}.`)
this.adapter = adapters[exchange]
this.scan = this.adapter.scan
this.bottleneck = new Bottleneck({ minTime: this.adapter.ratelimit })
}
export function addRateLimiting (octokit: GitHubAPI, limiter: Bottleneck) {
if (!limiter) {
limiter = new Bottleneck({
maxConcurrent: 1,
minTime: 1000
})
}
const noop = () => Promise.resolve()
octokit.hook.before('request', () => limiter.schedule(noop))
}