If more iterations should yield better results then you could ignore input t
parameter and iterate until result converges within current precision:
import decimal
#NOTE: separate user input from the algorithm
# no input inside `radius()` function
def radius(r1, r2, r3):
with decimal.localcontext() as ctx:
ctx.prec += 2 # increase precision for intermediate calculations
prev = None # previous value
k1, k2, k3 = [1 / decimal.Decimal(r) for r in [r1, r2, r3]]
while True:
# change some parameters to simulate converging algorithm
#NOTE: you don't need to wrap anything using `Decimal()`
k1 = k1 + k2 + k3 + 2*(k1*k2 + k2*k3 + k3*k1).sqrt()
r = 1 / k1
if prev == r: # compare using enhanced precision
break
prev = r # save previous value
return +r # `+` applies current precision
decimal.getcontext().prec = 50 # set desired precision
print(radius(*map(int, raw_input().split())))
For a working example that demonstrates this technique, see Gauss-Legendre Algorithm in python that calculate Pi upto 100 digits.