How do I implement this in Python?
Given is an input matrix like this one:
┌───┬───┬───┐ │ 2 │ 2 │ 5 │ ├───┼───┼───┤ │ . │ . │ 0 │ ├───┼───┼───┤ │ 3 │ . │ . │ └───┴───┴───┘
Instead of dots, insert numbers so that the sum of the numbers in the rows and columns in the square is the same.
My code:
def EnterMatr():
matr = []
n = int(input("Enter n: "))
print("Enter a matrix:")
for i in range(n):
arr = input().split()
for j in range(len(arr)):
if arr[j] == ".": # Dots are replaced by None
arr[j] = None
else:
arr[j] = int(arr[j])
matr.append(arr)
return matr
def SimpleMatrix(number, row, colum, size):
"""
Distribution of the matrix by elements, for example
our matrix:
[2 2 5]
[. . 0]
[1 . .]
for 2 in (0,0) => [2 0 0]
[0 2 0]
[0 0 2]
for 2 in (0,1) => [0 2 0]
[2 0 0]
[0 0 2]
"""
matrix = [[0 for _ in range(size)] for _ in range(size)]
matrix[row][colum] = number
for i in range(size):
for j in range(size):
if not (i == row or j == colum) and RowAndColumEmpty(matrix, i, j):
matrix[i][j] = number
break
return matrix
def GenerateMultiSemiMetrix(matr):
MultiMatrix = []
for i in range(len(matr)):
for j in range(len(matr)):
if matr[i][j]:
MultiMatrix.append(SimpleMatrix(matr[i][j], i,j,len(matr)))
return MultiMatrix
def RowAndColumEmpty(matr, row, colum):
"""
Checking whether the row and column where we insert are empty
"""
for i in range(len(matr)):
if not matr[row][i] == 0:
return False
for i in range(len(matr)):
if not matr[i][colum] == 0:
return False
return True
def addMatr(first, second):
matrix = [[0 for _ in range(len(first))] for _ in range(len(first))]
for i in range(len(first)):
for j in range(len(first)):
matrix[i][j] = first[i][j] + second[i][j]
return matrix
def SumMatrix(MatrArr):
matrix = [[0 for _ in range(len(MatrArr[0]))] for _ in range(len(MatrArr[0]))]
for elem in MatrArr:
matrix = addMatr(matrix, elem)
return matrix
matr = EnterMatr()
matr = SumMatrix(GenerateMultiSemiMetrix(matr))
for elem in matr:
print(elem)
Here is the output of an execution of that code:
Enter n: 3
Enter a matrix:
2 2 5
. . 0
1 . .
[2, 3, 5]
[7, 2, 1]
[1, 5, 4]
The problem is that the result matrix has different numbers at places where already numbers were given in the input matrix:
- The original 2 at (0, 1) becomes 3
- The original 0 at (1, 2) becomes 1
This should not be possible.
How can I change the program such that it would only change the dots?
Is there any algorithm or something else that could solve my problem?