4

I have a fairly simple sensor which outputs a measured distance, with a number of adjustable parameters which determine how the sensor takes said measurement.

My question is, what is the most programmatically efficient method for zeroing the settings in so that the sensor's reading aligns with a known, calibration distance? Each setting has a known minimum, maximum and (minimum) step size.

This is my first attempt:

def setParameters(nl, nd, v1, v2, v3, ln, lto, sr, vo, a, do):
    nLightBox.SetText(nl)
    nDataBox.SetText(nd)
    vtx1Box.SetText(v1)
    vtx2Box.SetText(v2)
    vtx3Box.SetText(v3)
    ledNumBox.SetText(ln)
    ltOffsetBox.SetText(lto)
    srBox.SetText(sr)
    vofsBox.SetText(vo)
    aBox.SetText(a)
    dofsBox.SetText(do)
    setButton.Click()

closestMeasure = math.inf
closestSettings = {
    'nLight': NLIGHT_DEFAULT,
    'nData': NDATA_DEFAULT,
    'vtx1': VTX1_DEFAULT,
    'vtx2': VTX2_DEFAULT,
    'vtx3': VTX3_DEFAULT,
    'ledNum': LED_NUM_DEFAULT,
    'ltOffset': LT_OFFSET_DEFAULT,
    'sr': SR_DEFAULT,
    'vofs': VOFS_DEFAULT,
    'alpha': ALPHA_DEFAULT,
    'dofs': DOFS_DEFAULT
}

try:
    print("Adjusting parameters...")
    for i in [1000, 100, 10, 1]:
        for do in arange(DOFS_MIN, DOFS_MAX+0.01, DOFS_STEP*i):
            for vo in range(VOFS_MIN, VOFS_MAX+1, VOFS_STEP*i):
                for lto in range(LT_OFFSET_MIN, LT_OFFSET_MAX+1, LT_OFFSET_STEP*i):
                    for sr in arange(SR_MIN, SR_MAX+0.01, SR_STEP*i):
                        for a in arange(ALPHA_MIN, ALPHA_MAX+0.01, ALPHA_STEP*i):
                            for nl in range(NLIGHT_MIN, NLIGHT_MAX+1, NLIGHT_STEP*i):
                                for nd in range(NDATA_MIN, NDATA_MAX+1, NDATA_STEP*i):
                                    for v1 in range(VTX1_MIN, VTX1_MAX+1, VTX1_STEP*i):
                                        for v2 in range(VTX2_MIN, VTX2_MAX+1, VTX2_STEP*i):
                                            for v3 in range(VTX3_MIN, VTX3_MAX+1, VTX3_STEP*i):
                                                for ln in range(LED_NUM_MIN, LED_NUM_MAX+1, LED_NUM_STEP*i):
                                                    setParameters(nl, nd, v1, v2, v3, ln, lto, sr, vo, a, do)
                                                    time.sleep(0.1)
                                                    sumMeasure = 0.00
                                                    samples = 0
                                                    for i in range(1,3):
                                                        if len(avgDistanceBox.TextBlock()) != 0:
                                                            sumMeasure += float(avgDistanceBox.TextBlock().replace(',','').replace('∞','inf'))
                                                            samples += 1
                                                        time.sleep(0.05)
                                                    if samples > 0:
                                                        measured = (sumMeasure/samples)*0.001
                                                        if (abs(measured - distance)) < abs((closestMeasure - distance)):
                                                            closestMeasure = measured
                                                            print("Reading at {} meters, target is {} meters".format(closestMeasure, distance))
                                                            closestSettings = {
                                                                'nLight': nl,
                                                                'nData': nd,
                                                                'vtx1': v1,
                                                                'vtx2': v2,
                                                                'vtx3': v3,
                                                                'ledNum': ln,
                                                                'ltOffset': lto,
                                                                'sr': sr,
                                                                'vofs': vo,
                                                                'alpha': a,
                                                                'dofs': do
                                                            }
except:
    print("Error during parameter adjustment: ", sys.exc_info()[0])
    raise

print(closestMeasure)
print(closestSettings)

As you probably can see this is a horribly inefficient way to tackle this problem. Any advice would be very much appreciated!

Edit: Added more of the relevant code

Edit 2: Here is the how the distance is calculated

Distance=foo

Where Vout1 and Vout2 are sensor outputs, C is the speed of light, and T0 is equal to ledNum

Here are what the values are, and the minimums, maximums and minimum step values

# Amount of sample frames to capture
NUMFRAMES_DEFAULT = 10
NUMFRAMES_MIN = 1
NUMFRAMES_MAX = 10000
NUMFRAMES_STEP = 1

# Number of times to emit light in a frame
NLIGHT_DEFAULT = 100
NLIGHT_MIN = 0
NLIGHT_MAX = 65535
NLIGHT_STEP = 1

# Number of times to read data in a frame
NDATA_DEFAULT = 100
NDATA_MIN = 5
NDATA_MAX = 250
NDATA_STEP = 1

# VTX1 Pulse Width (Should equal LED_NUM)
VTX1_DEFAULT = 40
VTX1_MIN = 20
VTX1_MAX = 5100
VTX1_STEP = 20

# VTX2 Pulse Width (Should equal LED_NUM)
VTX2_DEFAULT = 40
VTX2_MIN = 20
VTX2_MAX = 5100
VTX2_STEP = 20

# VTX3 High Period
VTX3_DEFAULT = 920
VTX3_MIN = 20
VTX3_MAX = 1310700
VTX3_STEP = 20

# LED Emission Pulse Width
LED_NUM_DEFAULT = 40
LED_NUM_MIN = 20
LED_NUM_MAX = 5100
LED_NUM_STEP = 20

# LED Emission Delay
LT_OFFSET_DEFAULT = 20
LT_OFFSET_MIN = 0
LT_OFFSET_MAX = 300
LT_OFFSET_STEP = 20

# Sensitivity Ratio
SR_DEFAULT = 1.00
SR_MIN = 0.00
SR_MAX = 2.00
SR_STEP = 0.01

# Voltage Offset
VOFS_DEFAULT = 0
VOFS_MIN = 0
VOFS_MAX = 1000
VOFS_STEP = 1

# Slope
ALPHA_DEFAULT = 1.00
ALPHA_MIN = 0.00
ALPHA_MAX = 5.00
ALPHA_STEP = 0.01

# Distancce Offset
DOFS_DEFAULT = 0.00
DOFS_MIN = -100.00
DOFS_MAX = 100.00
DOFS_STEP = 0.01
# End definition
Ross Swartz
  • 53
  • 1
  • 6
  • 3
    You essentially have a multivariable optimization problem. You might want to consider [`scipy.optimize`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html), which lets you set the bounds of each variable, as well as initial guesses, along with various solver algorithms – Cory Kramer Oct 02 '17 at 12:43
  • 2
    To those who flagged as duplicate, that does not answer the spirit of this question. Their nested for loops are a brute-force implementation of an optimization method, restructuring their loops doesn't solve the actual problem. – Cory Kramer Oct 02 '17 at 12:46
  • I actually flagged as "too broad". – mkrieger1 Oct 02 '17 at 12:47
  • @Cory this asked for programmatically efficient not time or memory efficient – Nick is tired Oct 02 '17 at 12:47
  • Okay, I misread, so I reopened. – cs95 Oct 02 '17 at 12:49
  • The question can't be answered. The problem isn't really the giant nested for loop (which, admittedly, could be rewritten more readably using `itertools.product`), but the fact that once you've called `setParameters`, you need a way to evaluate the *result* of doing so against the result from previous settings, and we have no idea what `setParamters` actually does or how to evaluate such a result. – chepner Oct 02 '17 at 12:52
  • 2
    If every set of possible parameters is completely independent, then this *is* the most efficient way to check all settings. But the setting almost certainly are *not* independent, we can't really help without more (probably *much* more) information. – chepner Oct 02 '17 at 12:53
  • I have added more information to the question. – Ross Swartz Oct 02 '17 at 13:30
  • Do you know whether the system response is convex, or does it have lots of local minima/maxima? – pjs Oct 02 '17 at 13:52
  • From how the sensor works, the system response is likely to have many local minima/maxima. That isn't something that I have spent time mapping out though. – Ross Swartz Oct 02 '17 at 14:02
  • 1
    @RossSwartz You can try [How approximation search works](https://stackoverflow.com/a/36163847/2521214) which converts your `O(n^m)` problem to `O(log^m(n))` which boost performance a lot but I am afraid still not enough. Another option is to derive the parameters algebraically from your equation exploiting some math/physics identities and properties but for that we would need to now much more about the sensor and environment and also that part is pure physics/math instead of programing so it would be off topic here The best bet is use [calibration](https://stackoverflow.com/q/29166819/2521214) – Spektre Oct 04 '17 at 11:04

1 Answers1

1

The way I have ended up solving this problem (with this specific sensor) is unsurprisingly fairly simple.

The sensor data can be imagined to correspond to an arbitrary value X for each actual distance Y, and thus can be expressed as a graph. The Dofs setting can then be thought of as the Y offset at the measured point, and the (optimal) Alpha setting as the slope of a best-fit line through the possible points. With regards to this initial sensor calibration, the other settings do not need to be modified.

In order to properly calibrate the sensor, three (decreasing) distances are selected and these steps are followed:

Place target at first distance, save first X, find Dofs at X=Y step1

Move target to second and third distances, save X values step2

Calculate ΔX by averaging the ΔX of A->B and that of B->C, and do the same with ΔY

Set Alpha to ΔY/ΔX step3

Relevant Code:

# Shift the y-offset (Dofs) up or down until the sensor reading equals the actual first distance (x=y)
input("Ensure target ({}) is at the first distance ({}m), then press [Enter] to continue...".format(target, distance1))
print("Measuring sensor reading...")
firstX = takeMeasurement()
optimalDofs = math.inf
print("Adjusting distance offset...")
while True:
    reading = takeMeasurement()
    print("Reading: {:.3f}m, Dofs: {}m, Target: {}m".format(reading, getOutput(settings['dofs'][0]), distance1))
    if (reading > distance1-margin) and (reading < distance1+margin):
        optimalDofs = getOutput(settings['dofs'][0])
        print("Optimal Distance Offset: {}m, Initial Sensor Reading: {}m".format(optimalDofs, firstX))
        break
    diff = abs(reading - distance1)
    jump = getJump(diff)
    if reading < distance1+margin:
        adjustDown(settings['dofs'][0], jump)
    elif reading > distance1-margin:
        adjustUp(settings['dofs'][0], jump)

# Reset dofs to default
print("Resetting distance offset...")
adjust(settings['dofs'][0], DOFS_DEFAULT)

# Finds the sensor reading (x) at the second distance
input("Move target ({}) to the second distance ({}m), then press [Enter] to continue...".format(target, distance2))
print("Measuring sensor reading...")
secondX = takeMeasurement()
print("Sensor Reading: {}m".format(secondX))

# Finds the sensor reading (x) at the third distance
input("Move target ({}) to the third distance ({}m), then press [Enter] to continue...".format(target, distance3))
print("Measuring sensor reading...")
thirdX = takeMeasurement()
print("Sensor Reading: {}m".format(thirdX))

# Calculate the optimal alpha value by averaging delta X and delta Y
# Set dofs back to the optimal setting
print("Calculating optimal alpha...")
deltaX = (abs(firstX-secondX)+abs(secondX-thirdX))/2
print("Delta X: {}m".format(deltaX))
deltaY = (abs(distance1-distance2)+abs(distance2-distance3))/2
print("Delta Y: {}m".format(deltaY))
newAlpha = np.around(deltaY/deltaX, decimals=2)
print("Calculated optimal alpha: {:.2f}".format(newAlpha))
print("Setting alpha...")
adjust(settings['alpha'][0], newAlpha)
print("Readjusting distance offset...")
adjust(settings['dofs'][0], optimalDofs)
print("Optimal Alpha: {}, Optimal Distance Offset: {}".format(newAlpha, optimalDofs))
Ross Swartz
  • 53
  • 1
  • 6