0

I'm trying to do the following using Node.js and the 'oracledb' node:

  1. Consult Database A to get all bills released on a specific date.
  2. Assign the result to a variable list.
  3. Use the function .map() on list, and inside this function consult Database B to get client's info by a common key, for each item of list.

The problem is: the Database B requests are done all together, so if there's 1000 bills to map, it returns only 100 and treats the rest as error. It is probably related to the number of requests at the same time.

So, given the details, I'd like to know if there's a way to divide the number of requests (e.g. 100 at the time), or any other solution.

ps.: i apologize in advance for my mistakes. I also apologize for not demonstrate on code.

MT0
  • 143,790
  • 11
  • 59
  • 117
J. Meyer
  • 1
  • 2
  • See the `concurrency` option in [Bluebird's `Promise.map()`](http://bluebirdjs.com/docs/api/promise.map.html). – jfriend00 Mar 22 '18 at 17:37
  • Other similar questions/answers: [multiple api calls hang after a while](https://stackoverflow.com/questions/48876048/node-and-socket-io-multiple-api-calls-hang-after-a-while/48914867#48914867), [request with a list of urls](https://stackoverflow.com/questions/47299174/nodejs-async-request-with-a-list-of-url/47299802#47299802), [million api calls](https://stackoverflow.com/questions/34802539/node-js-socket-explanation/34802932#34802932). – jfriend00 Mar 22 '18 at 17:41
  • I wonder if you are using 100 connections in parallel? This might be less than desirable. Overall it sounds like a task you could do in PL/SQL, if not in SQL and be much more efficient. – Christopher Jones Mar 23 '18 at 07:04
  • I agree with Chris. It's hard to say what's going on without seeing some code. How long does it take for the first 100 to succeed? What is the error that the rest get? Are you using a connection pool for either database (more importantly for database B)? If so, what size? What are you setting UV_THREADPOOL_SIZE to? A bad solution would be to use something like async.queue, a good solution would be to use something like PL/SQL. I'll try to put together a generic example for you... – Dan McGhan Mar 27 '18 at 17:40

1 Answers1

0

Here's an example of how you can do this by leveraging the new executeMany in v2.2.0 (recently released) and global temporary tables to minimize round trips.

Given these objects:

-- Imagine this is the table you want to select from based on the common keys
create table t (
  common_key number,
  info       varchar2(50)
);

-- Add 10,000 rows with keys 1-10,000 and random data for info
insert into t (common_key, info)
select rownum,
  dbms_random.string('p', 50)
from dual
connect by rownum <= 10000;

commit;

-- Create a temp table
create global temporary table temp_t (
  common_key number not null
)
on commit delete rows;

The following should work:

const oracledb = require('oracledb');
const config = require('./dbConfig.js');
const startKey = 1000;
const length = 2000;

// Uses a promise to simulate async work.
function getListFromDatabaseA() {
  return new Promise((resolve) => {
    const list = [];
    const count = length - startKey;

    for (let x = 0; x < count; x += 1) {
      list.push(startKey + x);
    }

    resolve(list);
  });
}

// The list returned from A likely isn't in the right format for executeMany.
function reformatAsBinds(list) {
  const binds = [];

  for (let x = 0; x < list.length; x += 1) {
    binds.push({
      key: list[x]
    });
  }

  return binds;
}

async function runTest() {
  let conn;

  try {
    const listFromA = await getListFromDatabaseA();

    const binds = reformatAsBinds(listFromA);

    conn = await oracledb.getConnection(config);

    // Send the keys to the temp table with executeMany for a single round trip.
    // The data in the temp table will only be visible to this session and will
    // be deleted automatically at the end of the transaction.
    await conn.executeMany('insert into temp_t (common_key) values (:key)', binds);

    // Now get your common_key and info based on the common keys in the temp table.
    let result = await conn.execute(
     `select common_key, info 
      from t 
      where common_key in (
        select common_key
        from temp_t
      )
      order by common_key`
    );

    console.log('Got ' + result.rows.length + ' rows');

    console.log('Showing the first 10 rows');

    for (let x = 0; x < 10; x += 1) {
      console.log(result.rows[x]);
    }
  } catch (err) {
    console.error(err);
  } finally {
    if (conn) {
      try {
        await conn.close();
      } catch (err) {
        console.error(err);
      }
    }
  }
}

runTest();

After I posted the solution above, I thought I should provide an alternative that keeps the keys going from Node.js to the DB "in memory". You'll have to run tests and review explain plans to see which is the best option for you (will depend on a number of factors).

Given these objects:

-- This is the same as before
create table t (
  common_key number,
  info       varchar2(50)
);

-- Add 10,000 rows with keys 1-10,000 and random data for info
insert into t (common_key, info)
select rownum,
  dbms_random.string('p', 50)
from dual
connect by rownum <= 10000;

-- But here, we use a nested table instead of a temp table
create or replace type number_ntt as table of number;

This should work:

const oracledb = require('oracledb');
const config = require('./dbConfig.js');
const startKey = 1000;
const length = 2000;

// Uses a promise to simulate async work.
function getListFromDatabaseA() {
  return new Promise((resolve) => {
    const list = [];
    const count = length - startKey;

    for (let x = 0; x < count; x += 1) {
      list.push(startKey + x);
    }

    resolve(list);
  });
}

async function runTest() {
  let conn;

  try {
    const listFromA = await getListFromDatabaseA();

    const binds = {
      keys: {
        type: oracledb.NUMBER,
        dir: oracledb.BIND_IN,
        val: listFromA
      },
      rs: {
        type: oracledb.CURSOR,
        dir: oracledb.BIND_OUT
      }
    };

    conn = await oracledb.getConnection(config);

    // Now get your common_key and info based on what's in the temp table.
    let result = await conn.execute(
     `declare

        type number_aat is table of number index by pls_integer;

        l_keys    number_aat;
        l_key_tbl number_ntt := number_ntt();

      begin

        -- Unfortunately, we have to bind in with this data type, but
        -- it can't be used as a table...
        l_keys := :keys;

        -- So we'll transfer the data to another array type that can. This
        -- variable's type was created at the schema level so that it could
        -- be seen by the SQL engine.
        for x in 1 .. l_keys.count
        loop
          l_key_tbl.extend();
          l_key_tbl(l_key_tbl.count) := l_keys(x);
        end loop;

        open :rs for
          select common_key, info 
          from t 
          where common_key in (
            select column_value
            from table(l_key_tbl)
          )
          order by common_key;

      end;`,
      binds
    );

    const resultSet = result.outBinds.rs;

    console.log('Showing the first 10 rows');

    for (x = 0; x < 10; x += 1) {
      let row = await resultSet.getRow();
      console.log(row);
    }
  } catch (err) {
    console.error(err);
  } finally {
    if (conn) {
      try {
        await conn.close();
      } catch (err) {
        console.error(err);
      }
    }
  }
}

runTest();

The formatting for the binds was different (a bit simpler here). Also, because I was executing PL/SQL, I needed to have an out bind cursor/result set type.

See this post regarding cardinality with nested tables: http://www.oracle-developer.net/display.php?id=427

If you try both, please leave some feedback about which worked better.

Dan McGhan
  • 4,479
  • 1
  • 11
  • 15