I have a fairly simple sensor which outputs a measured distance, with a number of adjustable parameters which determine how the sensor takes said measurement.
My question is, what is the most programmatically efficient method for zeroing the settings in so that the sensor's reading aligns with a known, calibration distance? Each setting has a known minimum, maximum and (minimum) step size.
This is my first attempt:
def setParameters(nl, nd, v1, v2, v3, ln, lto, sr, vo, a, do):
nLightBox.SetText(nl)
nDataBox.SetText(nd)
vtx1Box.SetText(v1)
vtx2Box.SetText(v2)
vtx3Box.SetText(v3)
ledNumBox.SetText(ln)
ltOffsetBox.SetText(lto)
srBox.SetText(sr)
vofsBox.SetText(vo)
aBox.SetText(a)
dofsBox.SetText(do)
setButton.Click()
closestMeasure = math.inf
closestSettings = {
'nLight': NLIGHT_DEFAULT,
'nData': NDATA_DEFAULT,
'vtx1': VTX1_DEFAULT,
'vtx2': VTX2_DEFAULT,
'vtx3': VTX3_DEFAULT,
'ledNum': LED_NUM_DEFAULT,
'ltOffset': LT_OFFSET_DEFAULT,
'sr': SR_DEFAULT,
'vofs': VOFS_DEFAULT,
'alpha': ALPHA_DEFAULT,
'dofs': DOFS_DEFAULT
}
try:
print("Adjusting parameters...")
for i in [1000, 100, 10, 1]:
for do in arange(DOFS_MIN, DOFS_MAX+0.01, DOFS_STEP*i):
for vo in range(VOFS_MIN, VOFS_MAX+1, VOFS_STEP*i):
for lto in range(LT_OFFSET_MIN, LT_OFFSET_MAX+1, LT_OFFSET_STEP*i):
for sr in arange(SR_MIN, SR_MAX+0.01, SR_STEP*i):
for a in arange(ALPHA_MIN, ALPHA_MAX+0.01, ALPHA_STEP*i):
for nl in range(NLIGHT_MIN, NLIGHT_MAX+1, NLIGHT_STEP*i):
for nd in range(NDATA_MIN, NDATA_MAX+1, NDATA_STEP*i):
for v1 in range(VTX1_MIN, VTX1_MAX+1, VTX1_STEP*i):
for v2 in range(VTX2_MIN, VTX2_MAX+1, VTX2_STEP*i):
for v3 in range(VTX3_MIN, VTX3_MAX+1, VTX3_STEP*i):
for ln in range(LED_NUM_MIN, LED_NUM_MAX+1, LED_NUM_STEP*i):
setParameters(nl, nd, v1, v2, v3, ln, lto, sr, vo, a, do)
time.sleep(0.1)
sumMeasure = 0.00
samples = 0
for i in range(1,3):
if len(avgDistanceBox.TextBlock()) != 0:
sumMeasure += float(avgDistanceBox.TextBlock().replace(',','').replace('∞','inf'))
samples += 1
time.sleep(0.05)
if samples > 0:
measured = (sumMeasure/samples)*0.001
if (abs(measured - distance)) < abs((closestMeasure - distance)):
closestMeasure = measured
print("Reading at {} meters, target is {} meters".format(closestMeasure, distance))
closestSettings = {
'nLight': nl,
'nData': nd,
'vtx1': v1,
'vtx2': v2,
'vtx3': v3,
'ledNum': ln,
'ltOffset': lto,
'sr': sr,
'vofs': vo,
'alpha': a,
'dofs': do
}
except:
print("Error during parameter adjustment: ", sys.exc_info()[0])
raise
print(closestMeasure)
print(closestSettings)
As you probably can see this is a horribly inefficient way to tackle this problem. Any advice would be very much appreciated!
Edit: Added more of the relevant code
Edit 2: Here is the how the distance is calculated
Where Vout1 and Vout2 are sensor outputs, C is the speed of light, and T0 is equal to ledNum
Here are what the values are, and the minimums, maximums and minimum step values
# Amount of sample frames to capture
NUMFRAMES_DEFAULT = 10
NUMFRAMES_MIN = 1
NUMFRAMES_MAX = 10000
NUMFRAMES_STEP = 1
# Number of times to emit light in a frame
NLIGHT_DEFAULT = 100
NLIGHT_MIN = 0
NLIGHT_MAX = 65535
NLIGHT_STEP = 1
# Number of times to read data in a frame
NDATA_DEFAULT = 100
NDATA_MIN = 5
NDATA_MAX = 250
NDATA_STEP = 1
# VTX1 Pulse Width (Should equal LED_NUM)
VTX1_DEFAULT = 40
VTX1_MIN = 20
VTX1_MAX = 5100
VTX1_STEP = 20
# VTX2 Pulse Width (Should equal LED_NUM)
VTX2_DEFAULT = 40
VTX2_MIN = 20
VTX2_MAX = 5100
VTX2_STEP = 20
# VTX3 High Period
VTX3_DEFAULT = 920
VTX3_MIN = 20
VTX3_MAX = 1310700
VTX3_STEP = 20
# LED Emission Pulse Width
LED_NUM_DEFAULT = 40
LED_NUM_MIN = 20
LED_NUM_MAX = 5100
LED_NUM_STEP = 20
# LED Emission Delay
LT_OFFSET_DEFAULT = 20
LT_OFFSET_MIN = 0
LT_OFFSET_MAX = 300
LT_OFFSET_STEP = 20
# Sensitivity Ratio
SR_DEFAULT = 1.00
SR_MIN = 0.00
SR_MAX = 2.00
SR_STEP = 0.01
# Voltage Offset
VOFS_DEFAULT = 0
VOFS_MIN = 0
VOFS_MAX = 1000
VOFS_STEP = 1
# Slope
ALPHA_DEFAULT = 1.00
ALPHA_MIN = 0.00
ALPHA_MAX = 5.00
ALPHA_STEP = 0.01
# Distancce Offset
DOFS_DEFAULT = 0.00
DOFS_MIN = -100.00
DOFS_MAX = 100.00
DOFS_STEP = 0.01
# End definition