Enthernet Code
Enthernet Code
DIIDevHeads IoT Integration Server
Created by Enthernet Code on 9/11/2024 in #firmware-and-baremetal
Tensor Allocation Issue in TinyML Deployment on Arduino Nano 33 BLE Sense
I'm working on a TinyML project using an Arduino Nano 33 BLE Sense microcontroller with a built-in 9-axis inertial measurement unit (IMU) sensor (LSM9DS1). My goal is to deploy a gesture recognition model using TensorFlow Lite for Microcontrollers. I have followed the steps to collect data, train the model|, and deploy` it onto the microcontroller, but I am encountering an error during inference. here's my code
#include <Arduino.h>
#include <TensorFlowLite.h>
#include "model.h"
#include <Wire.h>
#include <Arduino_L

const int kTensorArenaSize = 10 * 1024;
uint8_t tensor_arena[kTensorArenaSize];

const tflite::Model* model = tflite::GetModel(g_model);
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
TfLiteTensor* output = nullptr;

void setup() {
Serial.begin(115200);
while (!Serial);


if (!IMU.begin()) {
Serial.println("Failed to initialize IMU!");
while (1);
}

static_interpreter(
model, resolver, tensor_arena, kTensorArenaSize);
interpreter = &static_interpreter;

if (interpreter->AllocateTensors() != kTfLiteOk) {
Serial.println("Error allocating tensors!");
return;
}

input = interpreter->input(0);
output = interpreter->output(0);
}

void loop() {
float x, y, z;


if (IMU.accelerationAvailable()) {
IMU.readAcceleration(x, y, z);

input->data.f[0] = x;
input->data.f[1] = y;
input->data.f[2] = z;

if (interpreter->Invoke() != kTfLiteOk) {
Serial.println("Error during inference!");
return;
}

float gesture = output->data.f[0];
Serial.print("Detected gesture: ");
Serial.println(gesture);
}

delay(100);
}
#include <Arduino.h>
#include <TensorFlowLite.h>
#include "model.h"
#include <Wire.h>
#include <Arduino_L

const int kTensorArenaSize = 10 * 1024;
uint8_t tensor_arena[kTensorArenaSize];

const tflite::Model* model = tflite::GetModel(g_model);
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
TfLiteTensor* output = nullptr;

void setup() {
Serial.begin(115200);
while (!Serial);


if (!IMU.begin()) {
Serial.println("Failed to initialize IMU!");
while (1);
}

static_interpreter(
model, resolver, tensor_arena, kTensorArenaSize);
interpreter = &static_interpreter;

if (interpreter->AllocateTensors() != kTfLiteOk) {
Serial.println("Error allocating tensors!");
return;
}

input = interpreter->input(0);
output = interpreter->output(0);
}

void loop() {
float x, y, z;


if (IMU.accelerationAvailable()) {
IMU.readAcceleration(x, y, z);

input->data.f[0] = x;
input->data.f[1] = y;
input->data.f[2] = z;

if (interpreter->Invoke() != kTfLiteOk) {
Serial.println("Error during inference!");
return;
}

float gesture = output->data.f[0];
Serial.print("Detected gesture: ");
Serial.println(gesture);
}

delay(100);
}
9 replies