I am working on homework assignment 1 for the Performance Engineering of Software Systems course on MIT OCW. The assignment involves debugging some C code designed to multiply matrices. Matrices are created using the following function:
matrix* make_matrix(int rows, int cols) {
matrix* new_matrix = malloc(sizeof(matrix));
// Set the number of rows and columns
new_matrix->rows = rows;
new_matrix->cols = cols;
// Allocate a buffer big enough to hold the matrix.
new_matrix->values = (int**)malloc(sizeof(int*) * rows);
for (int i = 0; i < rows; i++) {
new_matrix->values[i] = (int*)malloc(sizeof(int) * cols);
}
return new_matrix;
}
They are multiplied using the following code:
int matrix_multiply_run(const matrix* A, const matrix* B, matrix* C) {
//tbassert(A->cols == B->rows,
//"A->cols = %d, B->rows = %dn", A->cols, B->rows);
//tbassert(A->rows == C->rows,
//"A->rows = %d, C->rows = %dn", A->rows, C->rows);
//tbassert(B->cols == C->cols,
//"B->cols = %d, C->cols = %dn", B->cols, C->cols);
for (int i = 0; i < A->rows; i++) {
for (int j = 0; j < B->cols; j++) {
for (int k = 0; k < A->cols; k++) {
C->values[i][j] += A->values[i][k] * B->values[k][j];
}
}
}
return 0;
}
The test file creates matrices, seeds A & B with random values, and then runs them through the multiplier using the following code:
unsigned int randomSeed = 1;
matrix* A;
matrix* B;
matrix* C;
const int kMatrixSize = 4;
A = make_matrix(kMatrixSize, kMatrixSize+1);
B = make_matrix(kMatrixSize, kMatrixSize);
C = make_matrix(kMatrixSize, kMatrixSize);
if (use_zero_matrix) {
for (int i = 0; i < A->rows; i++) {
for (int j = 0; j < A->cols; j++) {
A->values[i][j] = 0;
}
}
for (int i = 0; i < B->rows; i++) {
for (int j = 0; j < B->cols; j++) {
B->values[i][j] = 0;
}
}
} else {
for (int i = 0; i < A->rows; i++) {
for (int j = 0; j < A->cols; j++) {
A->values[i][j] = rand_r(&randomSeed) % 10;
}
}
for (int i = 0; i < B->rows; i++) {
for (int j = 0; j < B->cols; j++) {
B->values[i][j] = rand_r(&randomSeed) % 10;
}
}
}
matrix_multiply_run(A, B, C);
This should produce a segmentation fault that can be debugged, because A has 1 more column than B has rows. However, when I run it on my M3 Mac, it does not produce a segmentation fault and instead I get the following:
Matrix A:
------------
7 9 3 8 0
2 4 8 3 9
0 5 2 2 7
3 7 9 0 2
------------
Matrix B:
------------
3 9 9 7
0 3 9 8
6 5 7 6
2 7 0 3
Result:
------------
55 161 165 163
555 1540 1595 1570
401 1166 1214 1199
173 415 483 457
Why would this code not produce a segmentation fault when trying to access the nonexistent 5th row of B?
5