Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 57 additions & 47 deletions experiments/MnistExperiment.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,12 +176,60 @@ static void epochCallback(size_t epoch, float trainLoss, float evalLoss) {
writeCsvRow(LOG, epoch, 0, trainLoss, evalLoss);
}

static void writeCsvHeader(char *filePath) {
char *header = "epoch, batch, train_loss, eval_loss\n";
char *row[] = {header};
size_t entriesInRow[] = {4};
csvData_t csvData;
setCSVData(&csvData, row, 1, entriesInRow);
csvWriteRowsByBufferSize(filePath, &csvData, "w");
}

#define MODEL_SIZE 4

static void buildModel(layer_t **model) {
quantization_t *q = quantizationInitFloat();

// Linear 784→20
static float weight0Data[20 * 28 * 28] = {0};
size_t weight0Dims[] = {20, 28 * 28};
tensor_t *weight0Param = tensorInitWithDistribution(XAVIER_UNIFORM, weight0Data, weight0Dims, 2, q, NULL, 28*28, 20);
tensor_t *weight0Grad = gradInitFloat(weight0Param, NULL);
parameter_t *weight0 = parameterInit(weight0Param, weight0Grad);

static float bias0Data[20] = {0};
size_t bias0Dims[] = {1, 20};
tensor_t *bias0Param = tensorInitWithDistribution(ZEROS, bias0Data, bias0Dims, 2, q, NULL, 1, 20);
tensor_t *bias0Grad = gradInitFloat(bias0Param, NULL);
parameter_t *bias0 = parameterInit(bias0Param, bias0Grad);

model[0] = linearLayerInit(weight0, bias0, q, q, q, q);

// ReLU
model[1] = reluLayerInit(q, q);

// Linear 20→10
static float weight1Data[10 * 20] = {0};
size_t weight1Dims[] = {10, 20};
tensor_t *weight1Param = tensorInitWithDistribution(XAVIER_UNIFORM, weight1Data, weight1Dims, 2, q, NULL, 20, 10);
tensor_t *weight1Grad = gradInitFloat(weight1Param, NULL);
parameter_t *weight1 = parameterInit(weight1Param, weight1Grad);

static float bias1Data[10] = {0};
size_t bias1Dims[] = {1, 10};
tensor_t *bias1Param = tensorInitWithDistribution(ZEROS, bias1Data, bias1Dims, 2, q, NULL, 1, 10);
tensor_t *bias1Grad = gradInitFloat(bias1Param, NULL);
parameter_t *bias1 = parameterInit(bias1Param, bias1Grad);

model[2] = linearLayerInit(weight1, bias1, q, q, q, q);

// Softmax
model[3] = softmaxLayerInit(q, q);
}


int main(void) {
// this clears the old file
// also creates file if non-existent
FILE *fp = fopen(LOG, "w");
fclose(fp);
writeCsvHeader(LOG);

size_t numberOfEpochs = 10;
initDataSets();
Expand All @@ -205,52 +253,14 @@ int main(void) {
0,
true);

quantization_t *q = quantizationInitFloat();

float weight0Data[20 * 28 * 28] = {0};
size_t weight0Dims[] = {20, 28 * 28};
size_t weight0NumberOfDims = 2;
tensor_t *weight0Param = tensorInitWithDistribution(XAVIER_UNIFORM, weight0Data, weight0Dims, weight0NumberOfDims, q, NULL, 28*28, 20);
tensor_t *weight0Grad = gradInitFloat(weight0Param, NULL);
parameter_t *weight0 = parameterInit(weight0Param, weight0Grad);

float bias0Data[20] = {0};
size_t bias0Dims[] = {1, 20};
size_t bias0NumberOfDims = 2;
tensor_t *bias0Param = tensorInitWithDistribution(ZEROS, bias0Data, bias0Dims, bias0NumberOfDims, q, NULL, 1, 20);
tensor_t *bias0Grad = gradInitFloat(bias0Param, NULL);
parameter_t *bias0 = parameterInit(bias0Param, bias0Grad);

layer_t *linear0 = linearLayerInit(weight0, bias0, q, q, q, q);

layer_t *relu = reluLayerInit(q, q);

float weight1Data[10 * 20] = {0};
size_t weight1Dims[] = {10, 20};
size_t weight1NumberOfDims = 2;
tensor_t *weight1Param = tensorInitWithDistribution(XAVIER_UNIFORM, weight1Data, weight1Dims, weight1NumberOfDims, q, NULL, 20, 10);
tensor_t *weight1Grad = gradInitFloat(weight1Param, NULL);
parameter_t *weight1 = parameterInit(weight1Param, weight1Grad);

float bias1Data[10] = {0};
size_t bias1Dims[] = {1, 10};
size_t bias1NumberOfDims = 2;
tensor_t *bias1Param = tensorInitWithDistribution(ZEROS, bias1Data, bias1Dims, bias1NumberOfDims, q, NULL, 1, 10);
tensor_t *bias1Grad = gradInitFloat(bias1Param, NULL);
parameter_t *bias1 = parameterInit(bias1Param, bias1Grad);

layer_t *linear1 = linearLayerInit(weight1, bias1, q, q, q, q);

layer_t *softmax = softmaxLayerInit(q, q);

layer_t *model[] = {linear0, relu, linear1, softmax};
size_t sizeModel = 4;
layer_t *model[MODEL_SIZE];
buildModel(model);

optimizer_t *sgd = sgdMCreateOptim(0.001f, 0.f, 0.f, model, sizeModel, FLOAT32);
optimizer_t *sgd = sgdMCreateOptim(0.001f, 0.f, 0.f, model, MODEL_SIZE, FLOAT32);

clock_t start = clock();

trainingRunResult_t result = trainingRun(model, sizeModel, CROSS_ENTROPY,
trainingRunResult_t result = trainingRun(model, MODEL_SIZE, CROSS_ENTROPY,
trainDataloader, testDataloader, sgd,
numberOfEpochs, calculateGradsSequential,
inferenceWithLoss, epochCallback);
Expand All @@ -261,7 +271,7 @@ int main(void) {
PRINT_INFO("Training finished in %f seconds\n", duration_sec);
PRINT_INFO("Final train loss: %f, eval loss: %f\n", result.finalTrainLoss, result.finalEvalLoss);

float accuracy = evaluationEpochAccuracy(model, sizeModel, testDataloader, 10, inference);
float accuracy = evaluationEpochAccuracy(model, MODEL_SIZE, testDataloader, 10, inference);

PRINT_INFO("Integration test accuracy: %.2f%%\n", accuracy * 100.0f);
}
7 changes: 6 additions & 1 deletion src/arithmetic/Arithmetic.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,11 @@ bool doDimensionsMatch(tensor_t *a, tensor_t *b) {
size_t aNumberOfDims = a->shape->numberOfDimensions;
size_t bNumberOfDims = b->shape->numberOfDimensions;

if (aNumberOfDims != bNumberOfDims) {
PRINT_ERROR("Rank mismatch: %lu vs %lu\n", aNumberOfDims, bNumberOfDims);
exit(1);
}

size_t aOrderedDims[aNumberOfDims];
size_t bOrderedDims[bNumberOfDims];

Expand All @@ -43,7 +48,7 @@ bool doDimensionsMatch(tensor_t *a, tensor_t *b) {
}
}
return true;
};
}

size_t calcTensorIndexByIndices(size_t numberOfDimensions, size_t *dimensions, size_t *indices) {
size_t index = indices[numberOfDimensions - 1];
Expand Down
4 changes: 2 additions & 2 deletions src/userApi/tensor/TensorApi.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ tensor_t *tensorInitWithDistribution(distributionType_t distributionType, float
size_t numberOfDims, quantization_t *quantization,
sparsity_t *sparsity, size_t inputFeatures,
size_t outputFeatures) {
size_t numberOfValues = 0;
size_t numberOfValues = 1;
for (size_t i = 0; i < numberOfDims; i++) {
numberOfValues += dims[i];
numberOfValues *= dims[i];
}

switch (distributionType) {
Expand Down
34 changes: 34 additions & 0 deletions test/unit/arithmetic/UnitTestArithmetic.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,37 @@ void testFloat32ElementWithTensorArithmetic() {

}

void testDoDimensionsMatch_SameShape_ReturnsTrue() {
size_t aDims[] = {2, 3};
size_t aOrder[] = {0, 1};
shape_t aShape = {.dimensions = aDims, .orderOfDimensions = aOrder, .numberOfDimensions = 2};
tensor_t a = {.shape = &aShape};

size_t bDims[] = {2, 3};
size_t bOrder[] = {0, 1};
shape_t bShape = {.dimensions = bDims, .orderOfDimensions = bOrder, .numberOfDimensions = 2};
tensor_t b = {.shape = &bShape};

TEST_ASSERT_TRUE(doDimensionsMatch(&a, &b));
}

void testDoDimensionsMatch_DifferentDims_ReturnsFalse() {
size_t aDims[] = {2, 3};
size_t aOrder[] = {0, 1};
shape_t aShape = {.dimensions = aDims, .orderOfDimensions = aOrder, .numberOfDimensions = 2};
tensor_t a = {.shape = &aShape};

size_t bDims[] = {2, 4};
size_t bOrder[] = {0, 1};
shape_t bShape = {.dimensions = bDims, .orderOfDimensions = bOrder, .numberOfDimensions = 2};
tensor_t b = {.shape = &bShape};

TEST_ASSERT_FALSE(doDimensionsMatch(&a, &b));
}

// NOTE: doDimensionsMatch now calls exit(1) on rank mismatch — cannot test with Unity.
// The fix is verified by: different-rank inputs no longer silently read out of bounds.

void setUp() {}
void tearDown() {}

Expand All @@ -178,6 +209,9 @@ int main(void) {
RUN_TEST(testCalcIndexByRawIndex);
RUN_TEST(testInt32PointWiseArithmetic);
RUN_TEST(testFloat32ElementWithTensorArithmetic);
RUN_TEST(testDoDimensionsMatch_SameShape_ReturnsTrue);
RUN_TEST(testDoDimensionsMatch_DifferentDims_ReturnsFalse);
// testDoDimensionsMatch_DifferentRank — now exit(1)s, verified by code review

return UNITY_END();
}
9 changes: 9 additions & 0 deletions test/unit/tensor/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,13 @@ add_elastic_ai_unit_test(
Tensor
Rounding
Quantization
)
add_elastic_ai_unit_test(
LIB_UNDER_TEST
TensorApi
MORE_LIBS
Tensor
Rounding
Quantization
StorageApi
)
99 changes: 99 additions & 0 deletions test/unit/tensor/UnitTestTensorApi.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
#define SOURCE_FILE "UNIT_TEST_TENSOR_API"

#include <stddef.h>
#include <string.h>

#include "TensorApi.h"
#include "Tensor.h"
#include "Quantization.h"
#include "unity.h"

void setUp() {}
void tearDown() {}

void testTensorInitWithDistribution_Zeros_InitializesProductOfDimsValues() {
// dims = {2, 5} → product = 10, sum = 7
// Bug: += gives 7, *= gives 10
// Fill data with sentinel 42.0f, then ZEROS should overwrite exactly 10 values
float data[10];
for (size_t i = 0; i < 10; i++) {
data[i] = 42.0f;
}
size_t dims[] = {2, 5};
quantization_t q;
initFloat32Quantization(&q);

tensor_t *t = tensorInitWithDistribution(ZEROS, data, dims, 2, &q, NULL, 2, 5);

// All 10 values should be zero
float *values = (float *)t->data;
for (size_t i = 0; i < 10; i++) {
TEST_ASSERT_FLOAT_WITHIN(1e-9f, 0.0f, values[i]);
}
}

void testTensorInitWithDistribution_Ones_InitializesAllValues() {
// dims = {3, 4} → product = 12, sum = 7
// Fill data with 0.0f, then ONES should set exactly 12 values to 1.0f
float data[12];
memset(data, 0, sizeof(data));
size_t dims[] = {3, 4};
quantization_t q;
initFloat32Quantization(&q);

tensor_t *t = tensorInitWithDistribution(ONES, data, dims, 2, &q, NULL, 3, 4);

float *values = (float *)t->data;
for (size_t i = 0; i < 12; i++) {
TEST_ASSERT_FLOAT_WITHIN(1e-9f, 1.0f, values[i]);
}
}

void testTensorInitWithDistribution_Normal_InitializesAllValues() {
// dims = {4, 5} → product = 20, sum = 9
// If only 9 values are initialized, remaining 11 stay at sentinel
float data[20];
float sentinel = -999.0f;
for (size_t i = 0; i < 20; i++) {
data[i] = sentinel;
}
size_t dims[] = {4, 5};
quantization_t q;
initFloat32Quantization(&q);

tensor_t *t = tensorInitWithDistribution(NORMAL, data, dims, 2, &q, NULL, 4, 5);

// With NORMAL distribution, values should NOT be the sentinel
float *values = (float *)t->data;
size_t sentinelCount = 0;
for (size_t i = 0; i < 20; i++) {
if (values[i] == sentinel) {
sentinelCount++;
}
}
// All 20 values should have been overwritten — none should remain as sentinel
TEST_ASSERT_EQUAL_UINT(0, sentinelCount);
}

void testTensorInitWithDistribution_ShapeIsCorrect() {
// Verify the resulting tensor has the correct shape dimensions
float data[6] = {0};
size_t dims[] = {2, 3};
quantization_t q;
initFloat32Quantization(&q);

tensor_t *t = tensorInitWithDistribution(ZEROS, data, dims, 2, &q, NULL, 2, 3);

TEST_ASSERT_EQUAL_UINT(2, t->shape->numberOfDimensions);
size_t numElements = calcNumberOfElementsByTensor(t);
TEST_ASSERT_EQUAL_UINT(6, numElements);
}

int main(void) {
UNITY_BEGIN();
RUN_TEST(testTensorInitWithDistribution_Zeros_InitializesProductOfDimsValues);
RUN_TEST(testTensorInitWithDistribution_Ones_InitializesAllValues);
RUN_TEST(testTensorInitWithDistribution_Normal_InitializesAllValues);
RUN_TEST(testTensorInitWithDistribution_ShapeIsCorrect);
return UNITY_END();
}
26 changes: 24 additions & 2 deletions test/unit/userAPI/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,40 @@ add_elastic_ai_unit_test(
LIB_UNDER_TEST
TrainingLoopApi
MORE_LIBS
CalculateGradsSequential
TrainingEpochDefault
TrainingBatchDefault
CalculateGradsSequential
CommonLayerLibs
TensorApi
LinearApi
ReluApi
SgdApi
QuantizationApi
LossFunction
InferenceApi
DataLoader
DataLoaderApi
StorageApi
)

add_executable(UnitTestMultiLayerTraining UnitTestMultiLayerTraining.c)
target_link_libraries(UnitTestMultiLayerTraining PRIVATE
unity
TrainingLoopApi
TrainingEpochDefault
TrainingBatchDefault
CalculateGradsSequential
CommonLayerLibs
TensorApi
LinearApi
ReluApi
SoftmaxApi
SgdApi
QuantizationApi
LossFunction
InferenceApi
DataLoader
DataLoaderApi
StorageApi
)
)
__register_target_as_unit_test(UnitTestMultiLayerTraining)
Loading
Loading