Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
(${sqlExpression} ${comparison} ${sqlCursors[i] || sql.null})
OR\
(\
${sqlExpression} = ${sqlCursors[i] || sql.null}\
AND\
${sqlOldFilter}\
)\
)`;
}
// Check the cursor prefixes apply
// TODO:v5: we should be able to do this in JS-land rather than SQL-land
sqlFilter = sql.fragment`(((${sql.join(
getPgCursorPrefix(),
", "
)}) = (${sql.join(
rawPrefixes.map(val => sql.value(val)),
", "
)})) AND (${sqlFilter}))`;
queryBuilder.whereBound(sqlFilter, isAfter);
} else if (
cursorValue[0] === "natural" &&
typeof cursorValue[1] === "number" &&
isSafeInteger(cursorValue[1]) &&
cursorValue[1] >= 0
) {
// $FlowFixMe: we know this is a number
const cursorValue1: number = cursorValue[1];
if (isAfter) {
queryBuilder.offset(() => cursorValue1);
} else {
queryBuilder.limit(() => {
queryBuilder.offset(() => cursorValue1);
} else {
queryBuilder.limit(() => {
const offset = queryBuilder.getOffset();
return Math.max(0, cursorValue1 - offset - 1);
});
}
} else {
throw new Error("Cannot use 'before'/'after' without unique 'orderBy'");
}
});
const query = queryBuilder.build(options);
const haveFields = queryBuilder.getSelectFieldsCount() > 0;
const sqlQueryAlias = sql.identifier(Symbol());
const sqlSummaryAlias = sql.identifier(Symbol());
//
// Tables should ALWAYS push their PK onto the order stack, if this isn't
// present then we're either dealing with a view or a table without a PK.
// Either way, we don't have anything to guarantee uniqueness so we need to
// fall back to limit/offset.
//
// TODO: support unique keys in PgAllRows etc
// TODO: add a warning for cursor-based pagination when using the fallback
// TODO: if it is a view maybe add a warning encouraging pgViewUniqueKey
const canHaveCursorInWhere =
queryBuilder.getOrderByExpressionsAndDirections().length > 0 &&
queryBuilder.isOrderUnique();
const queryHasBefore =
queryBuilder.compiledData.whereBound.upper.length > 0;
const queryHasAfter = queryBuilder.compiledData.whereBound.lower.length > 0;
const queryHasZeroLimit = queryBuilder.getFinalLimit() === 0;
if (isAfter) {
queryBuilder.offset(() => cursorValue1);
} else {
queryBuilder.limit(() => {
const offset = queryBuilder.getOffset();
return Math.max(0, cursorValue1 - offset - 1);
});
}
} else {
throw new Error("Cannot use 'before'/'after' without unique 'orderBy'");
}
});
const query = queryBuilder.build(options);
const haveFields = queryBuilder.getSelectFieldsCount() > 0;
const sqlQueryAlias = sql.identifier(Symbol());
const sqlSummaryAlias = sql.identifier(Symbol());
//
// Tables should ALWAYS push their PK onto the order stack, if this isn't
// present then we're either dealing with a view or a table without a PK.
// Either way, we don't have anything to guarantee uniqueness so we need to
// fall back to limit/offset.
//
// TODO: support unique keys in PgAllRows etc
// TODO: add a warning for cursor-based pagination when using the fallback
// TODO: if it is a view maybe add a warning encouraging pgViewUniqueKey
const canHaveCursorInWhere =
queryBuilder.getOrderByExpressionsAndDirections().length > 0 &&
queryBuilder.isOrderUnique();
const queryHasBefore =
queryBuilder.compiledData.whereBound.upper.length > 0;
const queryHasAfter = queryBuilder.compiledData.whereBound.lower.length > 0;
}${
nullsFirst === true
? sql.fragment` NULLS FIRST`
: nullsFirst === false
? sql.fragment` NULLS LAST`
: sql.blank
}`
),
","
)}`
: sql.blank
}
${(isSafeInteger(limit) && sql.fragment`limit ${sql.literal(limit)}`) ||
sql.blank}
${(offset && sql.fragment`offset ${sql.literal(offset)}`) || sql.blank}`;
if (flip) {
const flipAlias = Symbol();
fragment = sql.fragment`\
with ${sql.identifier(flipAlias)} as (
${fragment}
)
select *
from ${sql.identifier(flipAlias)}
order by (row_number() over (partition by 1)) desc`; /* We don't need to factor useAsterisk into this row_number() usage */
}
if (useAsterisk) {
/*
* NOTE[useAsterisk/row_number]: since LIMIT/OFFSET is inside this
* subquery, row_number() outside of this subquery WON'T include the
* offset. We must add it back wherever row_number() is used.
*/
const sqlCommon = sql.fragment`\
${sqlCommonUnbounded}
where ${queryBuilder.buildWhereClause(!invert, invert, options)}`;
/*
* Since the offset makes the diagram asymmetric, if offset === 0
* then the diagram is symmetric and things are simplified a little.
*/
const isForwardOrSymmetric = !invert || offset === 0;
if (!isForwardOrSymmetric) {
assert(invert);
assert(offset > 0);
// We're looking for a previous page, and there's an offset, so lets just
// assume there's a previous page where offset is smaller.
return sql.literal(true);
} else if (canHaveCursorInWhere) {
assert(isForwardOrSymmetric);
if (!queryHasBefore && !queryHasFirst) {
assert(isForwardOrSymmetric);
// There can be no next page since there's no upper bound
return sql.literal(false);
} else if (queryHasBefore && !queryHasFirst) {
/*
* We invert the upper buildWhereBoundClause to only represent the data
* after `before`, then check if there's at least one record in that set.
*
* This only works if the `before` cursor can be represented in the
* SQL WHERE clause, otherwise we're doing limit/offset pagination
* which requires different logic. It also only works if there's no
* `first` clause, otherwise there could be a next page before the
* `before` clause.
* then the diagram is symmetric and things are simplified a little.
*/
const isForwardOrSymmetric = !invert || offset === 0;
if (!isForwardOrSymmetric) {
assert(invert);
assert(offset > 0);
// We're looking for a previous page, and there's an offset, so lets just
// assume there's a previous page where offset is smaller.
return sql.literal(true);
} else if (canHaveCursorInWhere) {
assert(isForwardOrSymmetric);
if (!queryHasBefore && !queryHasFirst) {
assert(isForwardOrSymmetric);
// There can be no next page since there's no upper bound
return sql.literal(false);
} else if (queryHasBefore && !queryHasFirst) {
/*
* We invert the upper buildWhereBoundClause to only represent the data
* after `before`, then check if there's at least one record in that set.
*
* This only works if the `before` cursor can be represented in the
* SQL WHERE clause, otherwise we're doing limit/offset pagination
* which requires different logic. It also only works if there's no
* `first` clause, otherwise there could be a next page before the
* `before` clause.
*/
return sql.fragment`\
exists(
${sqlCommonUnbounded}
where ${queryBuilder.buildWhereClause(false, false, options)}
and not (${queryBuilder.buildWhereBoundClause(invert)})
} else if (invert) {
assert(offset === 0);
// Paginating backwards and there's no offset (which factors in before/after), so there's no previous page.
return sql.fragment`false`;
} else {
assert(!invert);
/*
* We're paginating forwards; either there's a before, there's a first,
* or both.
*
* We want to see if there's more than limit+offset records in sqlCommon.
*/
return sql.fragment`\
exists(
${sqlCommon}
offset ${sql.literal(limit + offset)}
)`;
}
}
}
const getPgCursorPrefix = (): SQL[] =>
*
* If you have a way to improve this, I'd love to see a PR - but please
* make sure that the integration tests pass with your solution first as
* there are a log of potential pitfalls!
*/
const selectionField = isPgClassLike
? /*
* This `when foo is null then null` check might *seem* redundant, but it
* is not - e.g. the compound type `(,,,,,,,)::my_type` and
* `null::my_type` differ; however the former also returns true to `foo
* is null`. We use this check to coalesce both into the canonical `null`
* representation to make it easier to deal with below.
*/
sql.query`(case when ${sqlResultSourceAlias} is null then null else ${sqlResultSourceAlias} end)`
: outputArgNames != null // It's a record
? sql.query`array[${sql.join(
outputArgNames.map(
(outputArgName, idx) =>
sql.query`${sqlResultSourceAlias}.${sql.identifier(
// According to https://www.postgresql.org/docs/10/static/sql-createfunction.html,
// "If you omit the name for an output argument, the system will choose a default column name."
// In PG 9.x and 10, the column names appear to be assigned with a `column` prefix.
outputArgName !== "" ? outputArgName : `column${idx + 1}`
)}::text`
),
" ,"
)}]`
: sql.query`(${sqlResultSourceAlias}.${sqlResultSourceAlias})::${sqlTypeIdentifier}`;
const result = await performQuery(
pgClient,
sql.query`with ${sqlResultSourceAlias} as (${sqlMutationQuery}) select (${selectionField})::text from ${sqlResultSourceAlias}`
const chunkToJson = (fieldsChunk: [SQL, string][]) =>
sql.fragment`jsonb_build_object(${sql.join(
fieldsChunk.map(
([expr, alias]) =>
sql.fragment`${sql.literal(alias)}::text, ${expr}`
),
", "
)})`;
return sql.fragment`(${sql.join(
fieldsChunks.map(chunkToJson),
" || "
)})::json`;
} else {
// PG9.4 will have issues with more than 100 parameters (50 keys)
return sql.fragment`json_build_object(${sql.join(
fields.map(
([expr, alias]) => sql.fragment`${sql.literal(alias)}::text, ${expr}`
),
", "
)})`;
}
}
const seenKeys: Array = [];
// TODO:perf: looping backwards is slow
for (let i = localSettings.length - 1; i >= 0; i--) {
const [key, value] = localSettings[i];
if (!seenKeys.includes(key)) {
seenKeys.push(key);
// Make sure that the third config is always `true` so that we are only
// ever setting variables on the transaction.
// Also, we're using `unshift` to undo the reverse-looping we're doing
sqlSettings.unshift(sql.fragment`set_config(${sql.value(key)}, ${sql.value(value)}, true)`);
}
}
}
const sqlSettingsQuery =
sqlSettings.length > 0 ? sql.compile(sql.query`select ${sql.join(sqlSettings, ', ')}`) : null;
// If we can avoid transactions, we get greater performance.
const needTransaction =
pgForceTransaction ||
!!sqlSettingsQuery ||
(operationType !== 'query' && operationType !== 'subscription');
// Now we've caught as many errors as we can at this stage, let's create a DB connection.
const withAuthenticatedPgClient: WithAuthenticatedPgClientFunction = !needTransaction
? simpleWithPgClient(pgPool)
: async cb => {
// Connect a new Postgres client
const pgClient = await pgPool.connect();
// Begin our transaction
await pgClient.query('begin');