Hello fellow JS junkies. Can someone please explain to me the difference between the following ways of evaluating truthiness of variables?
var q1Var1 = "hello",
q1Var2 = 0,
q1Var3 = true,
q1Var4 = "false",
q1Var5 = -1,
q1Var6 = undefined,
q1Var7 = null,
q1Var8;
// What is the difference between this block of code:
console.log( !q1Var1 );
console.log( !q1Var2 );
console.log( !q1Var3 );
console.log( !q1Var4 );
console.log( !q1Var5 );
console.log( !q1Var6 );
console.log( !q1Var7 );
console.log( !q1Var8 );
// and this block?
if( q1Var1 === true ){ console.log( "true" ) } else { console.log( "false" ) };
if( q1Var2 === true ){ console.log( "true" ) } else { console.log( "false" ) };
if( q1Var3 === true ){ console.log( "true" ) } else { console.log( "false" ) };
if( q1Var4 === true ){ console.log( "true" ) } else { console.log( "false" ) };
if( q1Var5 === true ){ console.log( "true" ) } else { console.log( "false" ) };
if( q1Var6 === true ){ console.log( "true" ) } else { console.log( "false" ) };
if( q1Var7 === true ){ console.log( "true" ) } else { console.log( "false" ) };
if( q1Var8 === true ){ console.log( "true" ) } else { console.log( "false" ) };
I was expecting to find that the second block would just be an inverse of the first (in other words, the values would just switch), however, it looks like only the variable set to nonstring "true" actually returns true. Why is this? What is the fundamental difference between the two evaluations:
!q1Var1
vs.
true === q1Var1