Diligent Key Pressers' answer is the right one if runtime is unimportant. I work a lot with 3D Arrays and I learned that copy value-by-value operations are incredibly expensive! Keep that in mind! Another thing is Linq is slow and preconditions are eating up times as well!
In my opinion, if time is of importance, this solution might be useful:
using System;
using System.Linq;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
namespace ArrayConverter {
public class Benchmark {
[Params(10, 100, 1000, 10000)]
public int size;
public double[][] data;
[GlobalSetup]
public void Setup() {
var rnd = new Random();
data = new double[size][];
for (var i = 0; i < size; i++) {
data[i] = new double[size];
for (var j = 0; j < size; j++) {
data[i][j] = rnd.NextDouble();
}
}
}
[Benchmark]
public void ComputeTo2D() {
var output = To2D(data);
}
[Benchmark]
public void ComputeTo2DFast() {
var output = To2DFast(data);
}
public static T[,] To2DFast<T>(T[][] source) where T : unmanaged{
var dataOut = new T[source.Length, source.Length];
var assertLength = source[0].Length;
unsafe {
for (var i=0; i<source.Length; i++){
if (source[i].Length != assertLength) {
throw new InvalidOperationException("The given jagged array is not rectangular.");
}
fixed (T* pDataIn = source[i]) {
fixed (T* pDataOut = &dataOut[i,0]) {
CopyBlockHelper.SmartCopy<T>(pDataOut, pDataIn, assertLength);
}
}
}
}
return dataOut;
}
public static T[,] To2D<T>(T[][] source) {
try {
var FirstDim = source.Length;
var SecondDim =
source.GroupBy(row => row.Length).Single()
.Key; // throws InvalidOperationException if source is not rectangular
var result = new T[FirstDim, SecondDim];
for (var i = 0; i < FirstDim; ++i)
for (var j = 0; j < SecondDim; ++j)
result[i, j] = source[i][j];
return result;
}
catch (InvalidOperationException) {
throw new InvalidOperationException("The given jagged array is not rectangular.");
}
}
}
public class Programm {
public static void Main(string[] args) {
BenchmarkRunner.Run<Benchmark>();
// var rnd = new Random();
//
// var size = 100;
// var data = new double[size][];
// for (var i = 0; i < size; i++) {
// data[i] = new double[size];
// for (var j = 0; j < size; j++) {
// data[i][j] = rnd.NextDouble();
// }
// }
//
// var outSafe = Benchmark.To2D(data);
// var outFast = Benchmark.To2DFast(data);
//
// for (var i = 0; i < outSafe.GetLength(0); i++) {
// for (var j = 0; j < outSafe.GetLength(1); j++) {
// if (outSafe[i, j] != outFast[i, j]) {
// Console.WriteLine("Error at: {0}, {1}", i, j);
// }
// }
// }
//
// Console.WriteLine("All Good!");
}
}
}
The CopyBlock Helper was obtained from here: https://gist.github.com/theraot/1bfd0deb4a1aab0a27d8
I just made a Wrapper for the IL-functions:
using System;
using System.Reflection.Emit;
namespace ArrayConverter {
// Inspired by:
// http://xoofx.com/blog/2010/10/23/high-performance-memcpy-gotchas-in-c/
public class CopyBlockHelper {
private const int BlockSize = 16384;
private static readonly CopyBlockDelegate CpBlock = GenerateCopyBlock();
private unsafe delegate void CopyBlockDelegate(void* des, void* src, uint bytes);
private static unsafe void CopyBlock(void* dest, void* src, uint count) {
var local = CpBlock;
local(dest, src, count);
}
static CopyBlockDelegate GenerateCopyBlock() {
// Don't ask...
var method = new DynamicMethod("CopyBlockIL", typeof(void),
new[] {typeof(void*), typeof(void*), typeof(uint)}, typeof(CopyBlockHelper));
var emitter = method.GetILGenerator();
// emit IL
emitter.Emit(OpCodes.Ldarg_0);
emitter.Emit(OpCodes.Ldarg_1);
emitter.Emit(OpCodes.Ldarg_2);
emitter.Emit(OpCodes.Cpblk);
emitter.Emit(OpCodes.Ret);
// compile to delegate
return (CopyBlockDelegate) method.CreateDelegate(typeof(CopyBlockDelegate));
}
public static unsafe void SmartCopy<T>(T* pointerDataOutCurrent, T* pointerDataIn, int length) where T : unmanaged {
var sizeOfType = sizeof(T);
var numberOfBytesInBlock = Convert.ToUInt32(sizeOfType * length);
var numOfIterations = numberOfBytesInBlock / BlockSize;
var overheadOfLastIteration = numberOfBytesInBlock % BlockSize;
uint offset;
for (var idx = 0u; idx < numOfIterations; idx++) {
offset = idx * BlockSize;
CopyBlock(pointerDataOutCurrent + offset / sizeOfType, pointerDataIn + offset / sizeOfType, BlockSize);
}
offset = numOfIterations * BlockSize;
CopyBlock(pointerDataOutCurrent + offset / sizeOfType, pointerDataIn + offset / sizeOfType, overheadOfLastIteration);
}
}
}
This results in the following results:
Method | size | Mean | Error | StdDev |
---------------- |------ |-----------------:|-----------------:|-----------------:|
ComputeTo2D | 10 | 972.2 ns | 18.981 ns | 17.755 ns |
ComputeTo2DFast | 10 | 233.1 ns | 6.672 ns | 6.852 ns |
ComputeTo2D | 100 | 21,082.5 ns | 278.679 ns | 247.042 ns |
ComputeTo2DFast | 100 | 6,100.2 ns | 66.566 ns | 62.266 ns |
ComputeTo2D | 1000 | 2,481,061.0 ns | 13,724.850 ns | 12,166.721 ns |
ComputeTo2DFast | 1000 | 1,939,575.1 ns | 18,519.845 ns | 16,417.358 ns |
ComputeTo2D | 10000 | 340,687,083.2 ns | 1,671,837.229 ns | 1,563,837.429 ns |
ComputeTo2DFast | 10000 | 279,996,210.4 ns | 955,032.923 ns | 745,626.822 ns |
If possible, try to use ArrayCopy, BlockCopy or IL-CopyBlock to increase conversion performance. The value-by-value copy operation is slow and therefore not the best option to use! Further speedup can be found by optimizing a few things and removing the if-clause. A factor of at least 2x should be achievable!