Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
[QUEUE_REMOVE]: async (sock, { payload }, acknowledge) => {
let whereClause = sql`queueId = ${payload.queueId} AND roomId = ${sock.user.roomId}`
// admins can remove any
if (!sock.user.isAdmin) {
whereClause += sql` AND userId = ${sock.user.userId}`
}
const query = sql`
DELETE FROM queue
WHERE ${whereClause}
`
const res = await db.run(String(query), query.parameters)
if (!res.stmt.changes) {
return acknowledge({
type: QUEUE_REMOVE + '_ERROR',
error: 'Could not remove queueId: ' + payload.queueId,
})
}
// success
acknowledge({ type: QUEUE_REMOVE + '_SUCCESS' })
// tell room
sock.server.to(sock.user.roomId).emit('action', {
type: QUEUE_PUSH,
payload: await Queue.get(sock.user.roomId)
})
DELETE FROM media WHERE mediaId IN (
SELECT media.mediaId FROM media LEFT JOIN paths USING(pathId) WHERE paths.pathId IS NULL
)
`)
log.info(`cleanup: ${res.stmt.changes} media in nonexistent paths`)
// remove stars for nonexistent songs
res = await db.run(`
DELETE FROM songStars WHERE songId IN (
SELECT songStars.songId FROM songStars LEFT JOIN songs USING(songId) WHERE songs.songId IS NULL
)
`)
log.info(`cleanup: ${res.stmt.changes} stars for nonexistent songs`)
log.info('cleanup: vacuuming database')
await db.run('VACUUM')
}
// client should resize before uploading to be
// well below this limit, but just in case...
if (img.length > IMG_MAX_LENGTH) {
ctx.throw(413, 'Invalid image')
}
fields.set('image', img)
}
const query = sql`
INSERT INTO users ${sql.tuple(Array.from(fields.keys()).map(sql.column))}
VALUES ${sql.tuple(Array.from(fields.values()))}
`
await db.run(String(query), query.parameters)
// success! return the (cleaned) credentials for sign-in
return {
username,
password: newPassword,
}
}
static async update (media) {
const { mediaId } = media
if (!Number.isInteger(mediaId)) {
throw new Error(`invalid mediaId: ${mediaId}`)
}
// currently uses an Object instead of Map
delete media.mediaId
const query = sql`
UPDATE media
SET ${sql.tuple(Object.keys(media).map(sql.column))} = ${sql.tuple(Object.values(media))}
WHERE mediaId = ${mediaId}
`
await db.run(String(query), query.parameters)
}
sql.get(`SELECT * FROM overrides WHERE guildID="${msg.guild.id}" AND channelID="${msg.channel.id}"`).then(row => {
if (row) {
sql.run('UPDATE overrides SET wiki=? WHERE guildID=? AND channelID=?', [wiki, msg.guild.id, msg.channel.id]);
} else {
sql.run('INSERT INTO overrides (guildID, channelID, wiki) VALUES (?,?,?)', [msg.guild.id, msg.channel.id, wiki]);
}
}).then(() => msg.reply(`The wiki override for channel ${msg.channel.name} is now set to ${wiki}`)).catch(console.error);
}
}).catch(error => {
console.error(error);
sql.run('CREATE TABLE IF NOT EXISTS medals (userId TEXT, medals INTEGER)').then(() => {
sql.run('INSERT INTO medals (userId, medals) VALUES (?, ?)', [r.users.last().id, 0]);
});
});
}
sql.run('CREATE TABLE IF NOT EXISTS guilds (id TEXT, mainWiki TEXT, broadcastChannel TEXT)').then(() => {
sql.run('CREATE TABLE IF NOT EXISTS overrides (guildID TEXT, channelID TEXT, wiki TEXT)').then(() => {
sql.run('INSERT INTO guilds (id) VALUES (?)', [guild.id]);
});
});
});
async function saveBridge(ip, user) {
let bridge = await db.get('SELECT * FROM bridges ' +
'WHERE ip = ? AND user = ?', ip, user);
if (typeof bridge === 'object') {
await db.run('UPDATE bridges SET user = ? WHERE ip = ?', user, ip);
bridge = await getBridge(bridge.id);
} else {
await db.run('INSERT INTO bridges (user, ip) VALUES (?, ?)', user, ip);
bridge = await getBridge();
}
return bridge;
}
return insertStmt.run(rowVals)
}
const commitBatch = (isFinal) => {
const retp = db.run('commit')
.then(() => (isFinal ? null : db.run('begin')))
return retp
}
/*
* TODO: multiple sources indicate wrapping inserts in a transaction is key to getting
* decent bulk load performance.
* We're currently wrapping all inserts in one huge transaction. Should probably break
* this into more reasonable (50K rows?) chunks.
*/
await db.run(dropStmt)
await db.run(createStmt)
log.log('table created')
await db.run('begin')
const insertStmt = await db.prepare(insertStmtStr)
const rowCount = await consumeStream(csv.fromPath(pathname, md.csvOptions),
insertRow(insertStmt), commitBatch, md.rowCount,
hasHeaderRow)
log.log('consumeStream completed, rowCount: ', rowCount)
insertStmt.finalize()
return md
} catch (err) {
log.error(err, err.stack)
throw err
}
}
static async remove (mediaIds) {
const batchSize = 999
while (mediaIds.length) {
const query = sql`
DELETE FROM media
WHERE mediaId IN ${sql.in(mediaIds.splice(0, batchSize))}
`
const res = await db.run(String(query), query.parameters)
log.info(`removed ${res.stmt.changes} media`)
}
}