0

I have tried several variations in JavaScript but none are getting the desired result.

assert(countIntegerBits(4), 3) // 100
assert(countIntegerBits(8), 4) // 1000
assert(countIntegerBits(20), 5) // 10100
assert(countIntegerBits(100), 7) // 1100100

// https://stackoverflow.com/questions/43122082/efficiently-count-the-number-of-bits-in-an-integer-in-javascript
function countIntegerBits(integer) {
  var length = 0

  while (integer = Math.floor(integer)) {
    if (integer & 1) {
      length++
    }

    integer /= 2
  }

  return length
}

function countIntegerBits(integer) {
  // var length = 0
  // while (integer !== 0) {
  //   length += countIntegerBits32(integer | 0)
  //   integer /= 0x100000000
  // }
  // return length
  //
  // or perhaps this:
  // https://gist.github.com/everget/320499f197bc27901b90847bf9159164#counting-bits-in-a-32-bit-integer
}

function countIntegerBits32(integer) {
  integer = integer - ((integer >> 1) & 0x55555555)
  integer = (integer & 0x33333333) + ((integer >> 2) & 0x33333333)
  return ((integer + (integer >> 4) & 0xF0F0F0F) * 0x1010101) >> 24
}

function countStringBits(string) {
  // looks like this / 8 would be good enough
  // https://codereview.stackexchange.com/questions/37512/count-byte-length-of-string
  var length = 0;
  for (var i = 0; i < normal_val.length; i++) {
    var c = normal_val.charCodeAt(i);
    length += c < (1 <<  7) ? 1 :
               c < (1 << 11) ? 2 :
               c < (1 << 16) ? 3 :
               c < (1 << 21) ? 4 :
               c < (1 << 26) ? 5 :
               c < (1 << 31) ? 6 : Number.NaN
  }
  return length;
}

function countFloatBits(float) {
  // looks too complicated for an SO question
  // http://binary-system.base-conversion.ro/real-number-converted-from-decimal-system-to-32bit-single-precision-IEEE754-binary-floating-point.php?decimal_number_base_ten=1.23&sign=0&exponent=01111111&mantissa=00111010111000010100011
}

function assert(a, b) {
  if (a !== b) throw new Error(a + ' != ' + b)
}

What I want to avoid is this convert-to-string hack

var length = integer.toString(2).split('').length

The only other thing I can think of doing is check if bit is set , until you get to the first 1, then start counting from there.

assert(countIntegerBits(4), 3) // 100
assert(countIntegerBits(8), 4) // 1000
assert(countIntegerBits(20), 5) // 10100
assert(countIntegerBits(100), 7) // 1100100

function countIntegerBits(integer) {
  var i = 0
  while (true) {
    if (integer & (1 << i)) {
      return 31 - i
    }

    i++
  }
}

function assert(a, b) {
  if (a !== b) throw new Error(a + ' != ' + b)
}

But that doesn't seem quite right because I'm not sure if all integers are represented as 32-bits under the hood, for example, (4).toString(2) gives "100", not 00000000000000000000000000000100, so not sure.

In there I have explored how to check the length of a string in bits, and same with floats, but while strings seem straightforward if it's utf-8 encoding, it seems like floats is a whole big thing, so my question is only about integers up to the max that JavaScript supports. I will, for all practical purposes for the moment, only be considering integers up to about 1-billion so it doesn't need to account for 123e456 bigints or anything, just numbers up to a few billion, or up to the basic 32-bit integer Max in JavaScript.

Lance
  • 75,200
  • 93
  • 289
  • 503
  • why not just take the base-2 log and round up? – Pointy Feb 16 '19 at 23:09
  • I don't know how to do that, don't have a CS degree or background in math. – Lance Feb 16 '19 at 23:10
  • `Math.log(n)/Math.log(2)` – Pointy Feb 16 '19 at 23:10
  • So `1 + Math.floor(Math.log(integer) / Math.log(2))`. That passes all the assertions above, yay! – Lance Feb 16 '19 at 23:12
  • `Math.log` might potentially introduce some rounding problems. I would use 32-bit solution. 1 billion is in range of 32 bits. – zch Feb 16 '19 at 23:16
  • Wondering if `Math.log` is _not_ a 32-bit solution, and an example of a rounding problem in this situation. – Lance Feb 16 '19 at 23:16
  • 2
    At my machine `Math.log(1<<29)/Math.log(2)` results in `29.000000000000004`. The algebraic result is `29`. So if you round down you will get the same result, but if you round up it will be wrong. I also think that it is not exactly specified how `log` rounds, so it might differ by environment. – zch Feb 16 '19 at 23:23

1 Answers1

2

There's a relationship between natural log (well, log to any base) and log to another base. For getting the base-2 log:

const log2 = n => Math.log(n) / Math.log(2);

You'd want to round down after adding 1:

const bits = n => Math.floor(log2(n) + 1);
Pointy
  • 405,095
  • 59
  • 585
  • 614