Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async _processOne (entry) {
const hash = entry.hash || entry
if (this._store._oplog.has(hash) || this._fetching[hash]) {
return
}
this._fetching[hash] = hash
this.emit('load.added', entry)
this._stats.tasksStarted += 1
const exclude = []
const log = await Log.fromEntryHash(this._store._ipfs, this._store.identity, hash, { logId: this._store._oplog.id, access: this._store.access, length: batchSize, exclude })
this._buffer.push(log)
const latest = log.values[0]
delete this._queue[hash]
// Mark this task as processed
this._stats.tasksProcessed += 1
// Notify subscribers that we made progress
this.emit('load.progress', this._id, hash, latest, null, this._buffer.length)
// Return all next pointers
return log.values.map(getNext).reduce(flatMap, [])
}
}
async _processOne (entry) {
const hash = entry.hash || entry
if (this._store._oplog.has(hash) || this._fetching[hash])
return
this._fetching[hash] = hash
this.emit('load.added', entry)
this._stats.tasksStarted += 1
const exclude = []
const log = await Log.fromEntryHash(this._store._ipfs, hash, this._store._oplog.id, batchSize, exclude, this._store.key, this._store.access.write)
this._buffer.push(log)
const latest = log.values[0]
delete this._queue[hash]
// Mark this task as processed
this._stats.tasksProcessed += 1
// Notify subscribers that we made progress
this.emit('load.progress', this._id, hash, latest, null, this._buffer.length)
// Return all next pointers
return log.values.map(getNext).reduce(flatMap, [])
}
}
if (this.options.onLoad) {
await this.options.onLoad(this)
}
const localHeads = await this._cache.get(this.localHeadsPath) || []
const remoteHeads = await this._cache.get(this.remoteHeadsPath) || []
const heads = localHeads.concat(remoteHeads)
if (heads.length > 0) {
this.events.emit('load', this.address.toString(), heads)
}
// Update the replication status from the heads
heads.forEach(h => this._recalculateReplicationMax(h.clock.time))
// Load the log
const log = await Log.fromEntryHash(this._ipfs, this.identity, heads.map(e => e.hash), {
logId: this._oplog.id,
access: this.access,
sortFn: this.options.sortFn,
length: amount,
exclude: this._oplog.values,
onProgressCallback: this._onLoadProgress.bind(this),
timeout: fetchEntryTimeout
})
// Join the log with the existing log
await this._oplog.join(log, amount)
// Update the index
if (heads.length > 0) {
await this._updateIndex()
}
return new Promise((resolve, reject) => {
if(hash && hash !== this.lastWrite && this._logs[channel]) {
this.events[channel].emit('load', 'sync', channel);
const oldCount = this._logs[channel].items.length;
Log.fromIpfsHash(this._ipfs, hash).then((other) => {
this._logs[channel].join(other).then((merged) => {
// Only emit the event if something was added
const joinedCount = this._logs[channel].items.length - oldCount;
if(joinedCount > 0) {
Cache.set(channel, hash);
// Cache the payloads
this._cacheOperations(this._logs[channel])
.then(() => {
this.events[channel].emit('sync', channel, hash);
this.events[channel].emit('loaded', 'sync', channel);
resolve();
})
.catch(reject);
} else {
this.events[channel].emit('loaded', 'sync', channel);
resolve();
.then((res) => {
Log.getIpfsHash(this._ipfs, this._logs[channel]).then((listHash) => {
this.lastWrite = listHash;
Cache.set(channel, listHash);
// Cache the payload
let op = JSON.parse(JSON.stringify(res.op));
Object.assign(op, { hash: res.node.payload });
if(op.key === null) Object.assign(op, { key: res.node.payload });
this._cached.push(op);
this.events[channel].emit('write', channel, listHash);
resolve(res.node.payload);
})
}).catch(reject);
});
res.on('data', bufferData)
res.on('end', done)
})
}
const onProgress = (hash, entry, count, total) => {
this._recalculateReplicationStatus(count, entry.clock.time)
this._onLoadProgress(hash, entry)
}
// Fetch the entries
// Timeout 1 sec to only load entries that are already fetched (in order to not get stuck at loading)
const snapshotData = await loadSnapshotData()
this._recalculateReplicationMax(snapshotData.values.reduce(maxClock, 0))
if (snapshotData) {
const log = await Log.fromJSON(this._ipfs, this.identity, snapshotData, { access: this.access, sortFn: this.options.sortFn, length: -1, timeout: 1000, onProgressCallback: onProgress })
await this._oplog.join(log)
await this._updateIndex()
this.events.emit('replicated', this.address.toString())
}
this.events.emit('ready', this.address.toString(), this._oplog.heads)
} else {
throw new Error(`Snapshot for ${this.address} not found!`)
}
return this
}
const op = entry.payload.op
const mods = this.capabilities['moderators']
const members = this.capabilities['members']
const isMod = mods.includes(entry.identity.id)
const isMember = members.includes(entry.identity.id)
if (op === 'ADD') {
// Anyone can add entry if open thread
if (!this._members) return await trueIfValidSig()
// Not open thread, any member or mod can add to thread
if (isMember || isMod) return await trueIfValidSig()
}
if (op === 'DEL') {
const hash = entry.payload.value
const delEntry = await entryIPFS.fromMultihash(this._ipfs, hash)
// An id can delete their own entries
if (delEntry.identity.id === entry.identity.id) return await trueIfValidSig()
// Mods can't delete other mods entries
if (mods.includes(delEntry.identity.id)) return false
// Mods can delete any other entries
if (isMod) return await trueIfValidSig()
}
return false
}
sync(hash) {
if(!hash || hash === this._lastWrite)
return Promise.resolve([]);
const oldCount = this._oplog.items.length;
let newItems = [];
this.events.emit('load', this.dbname);
return Log.fromIpfsHash(this._ipfs, hash)
.then((log) => this._oplog.join(log))
.then((merged) => newItems = merged)
.then(() => Log.getIpfsHash(this._ipfs, this._oplog))
.then((hash) => Cache.set(this.dbname, hash))
.then(() => this._index.updateIndex(this._oplog, newItems))
.then(() => {
if(newItems.length > 0)
this.events.emit('readable', this.dbname);
})
.then(() => newItems)
}
return Cache.loadCache(this.options.cacheFile).then(() => {
const cached = Cache.get(this.dbname);
if(cached) {
return Log.fromIpfsHash(this._ipfs, cached)
.then((log) => this._oplog.join(log))
.then((merged) => this._index.updateIndex(this._oplog, merged))
.then(() => this.events.emit('readable', this.dbname))
.then(() => this.events);
}
return Promise.resolve(this.events);
});
}
.then(() => Log.getIpfsHash(this._ipfs, this))
.then((hash) => Cache.set(this.name, hash))