Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
function init () {
// Linkify jobs are executed one by one
// (fixes race-conditions in huge DOMs, does not lock UI)
const linkifyJobs = new PQueue({ concurrency: 1 })
// console.log('[ipfs-companion] running Linkify experiment')
linkifyContainer(document.body, linkifyJobs)
.then(() => {
// console.log('[ipfs-companion] registering MutationObserver for Linkify experiment')
new MutationObserver(function (mutations) {
mutations.forEach(async (mutation) => linkifyMutation(mutation, linkifyJobs))
}).observe(document.body, {
characterData: true,
childList: true,
subtree: true
})
})
}
module.exports = function createDnslinkResolver (getState) {
// DNSLink lookup result cache
const cacheOptions = { max: 1000, maxAge: 1000 * 60 * 60 * 12 }
const cache = new LRU(cacheOptions)
// upper bound for concurrent background lookups done by resolve(url)
const lookupQueue = new PQueue({ concurrency: 4 })
// preload of DNSLink data
const preloadUrlCache = new LRU(cacheOptions)
const preloadQueue = new PQueue({ concurrency: 4 })
const dnslinkResolver = {
get _cache () {
return cache
},
setDnslink (fqdn, value) {
cache.set(fqdn, value)
},
clearCache () {
cache.reset()
}
playlistList.push({
name: '[Album] ' + albumInfo.album.name,
trackIds
})
}
logger.info('Download list:')
playlistList.forEach((item) => logger.info(' ' + item.name))
logger.initBar(Object.keys(trackList).length)
}
// Track processing
const trackDownloadQueue = new PQueue({ concurrency: config('trackDownloadConcurrency', 3) })
const trackCopyQueue = new PQueue({ concurrency: 1 })
for (let trackId in trackList) {
trackId = parseInt(trackId, 10)
let trackInfo = trackList[trackId]
trackDownloadQueue.add(async () => {
const tmpPath = os.tmpdir()
const realPath = path.resolve(__root, sha1(trackId).substr(0, 2))
const savePath = path.resolve(tmpPath, 'CloudMan/', sha1(trackId).substr(0, 2))
if (that.downloaded.has(trackId)) {
logger.info(`Track ${trackId} existed!`)
trackList[trackId].done = true
trackList[trackId].format = that.downloadedFormat[trackId]
logger._bar.tick(1)
return
}
try {
const explorer = cosmiconfig('compress');
const { config: { gzip, brotli, test, threshold } } = (await explorer.search()) || { config: defaultOptions };
const fileTest = new RegExp(test);
function* filesToCompress(bundle) {
if (bundle.name && fileTest.test(bundle.name)) {
yield bundle.name
}
for (var child of bundle.childBundles) {
yield* filesToCompress(child)
}
}
const queue = new pQueue({ concurrency: defaultOptions.concurrency });
[...filesToCompress(bundle)].forEach(file => {
queue.add(() => gzipCompress(file, { ...defaultOptions.gzip, threshold, ...gzip }));
queue.add(() => brotliCompress(file, { ...defaultOptions.brotli, threshold, ...brotli }));
});
await queue.onIdle();
const end = new Date().getTime();
const formattedOutput = output.sort(sortResults).map(formatResults);
console.log(chalk.bold.green(`\n⨠Compressed in ${((end - start) / 1000).toFixed(2)}s.\n`));
table(formattedOutput);
} catch (err) {
console.error(chalk.bold.red('β Compression error:\n'), err);
async _loop (limit, offset, totalWrites) {
const queue = new PQueue({
concurrency: this.options.concurrency || Infinity,
interval: this.options.concurrencyInterval || 0,
intervalCap: this.options.intervalCap || Infinity,
carryoverConcurrencyCount: this.options.carryoverConcurrencyCount || false
})
return this.__looper(limit, offset, totalWrites, queue)
.then(totalWrites => {
this.log(`Total Writes: ${totalWrites}`)
this.log('dump complete')
return totalWrites
})
.catch(err => {
this.emit('error', err)
this.log(`Total Writes: ${totalWrites}`)
this.log(`dump ended with error (get phase) => ${String(err)}`)
throw err
/**
* Is Open BSD or not.
*/
exports.IS_OPEN_BSD = process.platform === 'openbsd';
/**
* Is Sun OS or not.
*/
exports.IS_SUNOS = process.platform === 'sunos';
/**
* Is Windows or not.
*/
exports.IS_WINDOWS = process.platform === 'win32';
/**
* Global execution queue, which only allows one execution at the same time.
*/
exports.QUEUE = new p_queue_1.default({
autoStart: true,
concurrency: 1,
});
/**
* Stores global data for the current extension session.
*/
exports.SESSION = {};
/**
* Disposes 'SESSION', by removing its data.
*/
exports.SESSION_DISPOSER = {
/** @inheritdoc */
dispose: () => {
for (const P of Object.keys(exports.SESSION)) {
delete exports.SESSION[P];
}
'LAMBDA_DOCKER_NETWORK=host'
]
});
await container.start();
const promise = localstackReady(container);
environment.set('AWS_ACCESS_KEY_ID', 'bogus');
environment.set('AWS_SECRET_ACCESS_KEY', 'bogus');
environment.set('AWS_REGION', 'us-east-1');
const containerData = await container.inspect();
const host = await getHostAddress();
const mappedServices = mapServices(host, containerData.NetworkSettings.Ports, services);
await promise;
const pQueue = new PQueue({ concurrency: Number.POSITIVE_INFINITY });
await pQueue.addAll(services.map(serviceName => async () => {
const service = mappedServices[serviceName];
await waitForReady(service, () => service.isReady(service.client));
}));
return {
mappedServices,
cleanup: () => {
environment.restore();
return container.stop();
}
};
}
const saveTarballs = (pkg, ipfs, options) => {
if (!queue) {
queue = new PQueue({ concurrency: options.request.concurrency })
}
return Promise.all(
Object.keys(pkg.versions || {})
.map(versionNumber => {
return queue.add(async () => {
try {
await saveTarball(pkg, versionNumber, ipfs, options)
} catch (err) {
log(`π₯ Error storing tarball ${pkg.name} ${versionNumber}`, err)
}
})
})
)
}
constructor (options = {}) {
if (!options.path) {
throw new Error('Must specify path to file when creating new FileUtil()')
}
this.path = options.path
this._lockFileDir = path.join(os.tmpdir(), 'cypress')
this._lockFilePath = path.join(this._lockFileDir, `${md5(this.path)}.lock`)
this._queue = new pQueue({ concurrency: 1 })
this._cache = {}
this._lastRead = 0
exit.ensure(() => {
return lockFile.unlockSync(this._lockFilePath)
})
}