If all variables are uint16_t
, why do you need to explicitly cast result of subtraction to yield to result?
Min test to reproduce
#include <stdio.h>
#include <stdint.h>
static void
delay_until_not_working(uint16_t start_time, uint16_t delay_time) {
uint16_t curr_time = start_time;
/* Endless loop - even if unsigned integers */
while ((curr_time - start_time) < delay_time) {
printf("Start: %5d, Curr: %5d, delay: %5d\r\n", (int)start_time, (int)curr_time, (int)delay_time);
curr_time++;
}
}
static void
delay_until_working(uint16_t start_time, uint16_t delay_time) {
uint16_t curr_time = start_time;
/* Added casting to uint16_t */
while ((uint16_t)(curr_time - start_time) < delay_time) {
printf("Start: %5d, Curr: %5d, delay: %5d\r\n", (int)start_time, (int)curr_time, (int)delay_time);
curr_time++;
}
}
int
main() {
printf("Delay until working..\r\n");
delay_until_working(65530, 200);
printf("Delay until endless loop..\r\n");
delay_until_not_working(65530, 200);
return 0;
}
I have a 16-bit free-running timer to generate micro-seconds delay and have observed that in case of 16-bit
unsigned, I need explicit cast, otherwise while loop never ends. But this works fine when variables are uint32_t
. Architecture is Cortex-M with GCC 10.3.1
Question: Does subtraction operation return result in int
type, when sizeof(operands) < sizeof(int)
?