How about this code
#include <cstdio>
#include <cinttypes>
#include <type_traits>
#include <cstddef>
#include <iostream>
int main()
{
uint64_t tsc = 0xdeaddeadc0dec0de;
uint32_t MSB = *((uint32_t*)&tsc+1);
uint32_t LSB = *((uint32_t*)&tsc);
std::printf("low %x high %x \n", LSB,MSB);
uint64_t MLSB = 0;
*((uint32_t*)&MLSB) = LSB;
*((uint32_t*)&MLSB+1) = MSB;
std::printf("highlow %lx \n", MLSB);
uint64_t LMSB = 0;
*((uint32_t*)&LMSB+1) = LSB;
*((uint32_t*)&LMSB) = MSB;
std::printf("lowhigh %lx \n", LMSB);
}
question what could go wrong in this code ? dose it have dependency on machine to be big-endian ? probably yes. so for Little-endian LSB and MSB places will change or you can make it automatic by using system macros that give you the Endianness to decide which one is high or low and last if this code works faster and dose it worth it to do this instead of shift and type casting ?