I was looking for a 64-bit/32-bit division algorithm preferably with a small code size for a 32-bit x86 machine. I found this surprisingly simple one, but couldn't see how it works.
The algorithm is as follows.
- We are calculating
x / y
wherex
is 64-bit andy
is 32-bit. Both are unsigned. x.l
is the lower bits ofx
, andx.h
is the higher bits ofx
, assuming a little-endian machine.h <- x.h / y
x.h <- x.h % y
x.l <- x / y, (x.h <- x % y)
; 64-bit/32-bit division only works when the quotient fits in 32 bits, which seems to be true in this case.x.h <- h
return x
My math is just too short to easily get this algorithm. Could you help me to understand this algorithm?
This is a test program I wrote to see if it works.
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
typedef union {
uint64_t q;
struct {
uint32_t l;
uint32_t h;
};
} uint64_u;
uint64_t div_(uint64_t _x, uint32_t y) {
uint64_u x = {_x};
uint32_t h = x.h / y;
x.h = x.h % y;
__asm__ (
"div %2":
"+a" (x.l),
"+d" (x.h):
"r" (y)
);
x.h = h;
return x.q;
}
int main() {
for (int i = 0; i < 1000000; ++i) {
uint64_t x = (uint64_t)rand() << 32 | rand();
uint32_t y = rand() % RAND_MAX + 1;
uint64_t r0 = x / y;
uint64_t r1 = div_(x, y);
if (r0 != r1) {
abort();
}
}
return 0;
}