Here's an example of how you can do this by leveraging the new executeMany
in v2.2.0 (recently released) and global temporary tables to minimize round trips.
Given these objects:
-- Imagine this is the table you want to select from based on the common keys
create table t (
common_key number,
info varchar2(50)
);
-- Add 10,000 rows with keys 1-10,000 and random data for info
insert into t (common_key, info)
select rownum,
dbms_random.string('p', 50)
from dual
connect by rownum <= 10000;
commit;
-- Create a temp table
create global temporary table temp_t (
common_key number not null
)
on commit delete rows;
The following should work:
const oracledb = require('oracledb');
const config = require('./dbConfig.js');
const startKey = 1000;
const length = 2000;
// Uses a promise to simulate async work.
function getListFromDatabaseA() {
return new Promise((resolve) => {
const list = [];
const count = length - startKey;
for (let x = 0; x < count; x += 1) {
list.push(startKey + x);
}
resolve(list);
});
}
// The list returned from A likely isn't in the right format for executeMany.
function reformatAsBinds(list) {
const binds = [];
for (let x = 0; x < list.length; x += 1) {
binds.push({
key: list[x]
});
}
return binds;
}
async function runTest() {
let conn;
try {
const listFromA = await getListFromDatabaseA();
const binds = reformatAsBinds(listFromA);
conn = await oracledb.getConnection(config);
// Send the keys to the temp table with executeMany for a single round trip.
// The data in the temp table will only be visible to this session and will
// be deleted automatically at the end of the transaction.
await conn.executeMany('insert into temp_t (common_key) values (:key)', binds);
// Now get your common_key and info based on the common keys in the temp table.
let result = await conn.execute(
`select common_key, info
from t
where common_key in (
select common_key
from temp_t
)
order by common_key`
);
console.log('Got ' + result.rows.length + ' rows');
console.log('Showing the first 10 rows');
for (let x = 0; x < 10; x += 1) {
console.log(result.rows[x]);
}
} catch (err) {
console.error(err);
} finally {
if (conn) {
try {
await conn.close();
} catch (err) {
console.error(err);
}
}
}
}
runTest();
After I posted the solution above, I thought I should provide an alternative that keeps the keys going from Node.js to the DB "in memory". You'll have to run tests and review explain plans to see which is the best option for you (will depend on a number of factors).
Given these objects:
-- This is the same as before
create table t (
common_key number,
info varchar2(50)
);
-- Add 10,000 rows with keys 1-10,000 and random data for info
insert into t (common_key, info)
select rownum,
dbms_random.string('p', 50)
from dual
connect by rownum <= 10000;
-- But here, we use a nested table instead of a temp table
create or replace type number_ntt as table of number;
This should work:
const oracledb = require('oracledb');
const config = require('./dbConfig.js');
const startKey = 1000;
const length = 2000;
// Uses a promise to simulate async work.
function getListFromDatabaseA() {
return new Promise((resolve) => {
const list = [];
const count = length - startKey;
for (let x = 0; x < count; x += 1) {
list.push(startKey + x);
}
resolve(list);
});
}
async function runTest() {
let conn;
try {
const listFromA = await getListFromDatabaseA();
const binds = {
keys: {
type: oracledb.NUMBER,
dir: oracledb.BIND_IN,
val: listFromA
},
rs: {
type: oracledb.CURSOR,
dir: oracledb.BIND_OUT
}
};
conn = await oracledb.getConnection(config);
// Now get your common_key and info based on what's in the temp table.
let result = await conn.execute(
`declare
type number_aat is table of number index by pls_integer;
l_keys number_aat;
l_key_tbl number_ntt := number_ntt();
begin
-- Unfortunately, we have to bind in with this data type, but
-- it can't be used as a table...
l_keys := :keys;
-- So we'll transfer the data to another array type that can. This
-- variable's type was created at the schema level so that it could
-- be seen by the SQL engine.
for x in 1 .. l_keys.count
loop
l_key_tbl.extend();
l_key_tbl(l_key_tbl.count) := l_keys(x);
end loop;
open :rs for
select common_key, info
from t
where common_key in (
select column_value
from table(l_key_tbl)
)
order by common_key;
end;`,
binds
);
const resultSet = result.outBinds.rs;
console.log('Showing the first 10 rows');
for (x = 0; x < 10; x += 1) {
let row = await resultSet.getRow();
console.log(row);
}
} catch (err) {
console.error(err);
} finally {
if (conn) {
try {
await conn.close();
} catch (err) {
console.error(err);
}
}
}
}
runTest();
The formatting for the binds was different (a bit simpler here). Also, because I was executing PL/SQL, I needed to have an out bind cursor/result set type.
See this post regarding cardinality with nested tables:
http://www.oracle-developer.net/display.php?id=427
If you try both, please leave some feedback about which worked better.