MetalDetector
See this on the Arduino blog!
Instead of sensing the presence of metal, this tinyML device detects rock (music)
Let’s make this machine learn…
After a semester leering how to create machine learning models for a microcontroller, specifically an Ardunio Nano 33BLE sense, I present to you the MetalDetector. Now you see how metal your music is as you play it.
I took about 2 hours of music divided into two datasets, “metal” and “non_metal”. These two datasets were loaded into Edge Impulse to start the machine learning process.
The Edge Impulse project can be found here:
https://studio.edgeimpulse.com/public/65438/latest
After running the Impulse a machine learning model can be exported, since I am using the Arduino Nano 33 BLE Sense I chose to build and download an Arduino Library which has all the component parts and examples to start using the model on the BLE Sense.
There are a number of components that are needed to run the model on the BLE Sense, find the code I used to get the MetalDetector running at the bottom of this post.
To build the physical object I used a few different colors of acrylic, a protoboard, and a 180 degree servo.
/* Edge Impulse Arduino examples Copyright (c) 2021 EdgeImpulse Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ // If your target is limited in memory remove this macro to save 10K RAM #define EIDSP_QUANTIZE_FILTERBANK 0 /** Define the number of slices per model window. E.g. a model window of 1000 ms with slices per model window set to 4. Results in a slice size of 250 ms. For more info: https://docs.edgeimpulse.com/docs/continuous-audio-sampling */ #define EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW 3 /* Includes ---------------------------------------------------------------- */ #include <PDM.h> #include <skip_non_metal_7_inferencing.h> #include "Servo.h" // include the servo library Servo servoMotor; // creates an instance of the servo object to control a servo int servoPin = 3; // Control pin for servo motor /** Audio buffers, pointers and selectors */ typedef struct { signed short *buffers[2]; unsigned char buf_select; unsigned char buf_ready; unsigned int buf_count; unsigned int n_samples; } inference_t; static inference_t inference; static bool record_ready = false; static signed short *sampleBuffer; static bool debug_nn = false; // Set this to true to see e.g. features generated from the raw signal static int print_results = -(EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW); /** @brief Arduino setup function */ void setup() { Serial.begin(115200); Serial.println("Edge Impulse Inferencing Demo"); servoMotor.attach(servoPin); // pin 3 to the servo object // summary of inferencing settings (from model_metadata.h) ei_printf("Inferencing settings:\n"); ei_printf("\tInterval: %.2f ms.\n", (float)EI_CLASSIFIER_INTERVAL_MS); ei_printf("\tFrame size: %d\n", EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE); ei_printf("\tSample length: %d ms.\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT / 16); ei_printf("\tNo. of classes: %d\n", sizeof(ei_classifier_inferencing_categories) / sizeof(ei_classifier_inferencing_categories[0])); run_classifier_init(); if (microphone_inference_start(EI_CLASSIFIER_SLICE_SIZE) == false) { ei_printf("ERR: Failed to setup audio sampling\r\n"); return; } } /** @brief Arduino main function. Runs the inferencing loop. */ void loop() { bool m = microphone_inference_record(); if (!m) { ei_printf("ERR: Failed to record audio...\n"); return; } signal_t signal; signal.total_length = EI_CLASSIFIER_SLICE_SIZE; signal.get_data = µphone_audio_signal_get_data; ei_impulse_result_t result = {0}; EI_IMPULSE_ERROR r = run_classifier_continuous(&signal, &result, debug_nn); if (r != EI_IMPULSE_OK) { ei_printf("ERR: Failed to run classifier (%d)\n", r); return; } if (++print_results >= (EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW)) { // print the predictions float infrenceValue = result.classification[0].value * 100; // take value from prediction Serial.println(infrenceValue); // print prediction value float servoAngle = map(infrenceValue, 100, 0, 0, 110); servoMotor.write(servoAngle); //Servo movement delay(100); ei_printf("Predictions "); ei_printf("(DSP: %d ms., Classification: %d ms., Anomaly: %d ms.)", result.timing.dsp, result.timing.classification, result.timing.anomaly); ei_printf(": \n"); for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) { ei_printf(" %s: %.5f\n", result.classification[ix].label, result.classification[ix].value); // float infrenceValue = result.classification[0].value * 100; // take value from prediction // Serial.println(infrenceValue); // print prediction value // // float servoAngle = map(infrenceValue, 0, 100, 0, 179); // // servoMotor.write(servoAngle); //Servo movement // delay(100); } #if EI_CLASSIFIER_HAS_ANOMALY == 1 ei_printf(" anomaly score: %.3f\n", result.anomaly); #endif print_results = 0; } { // float infrenceValue = result.classification[0].value * 100; // take value from prediction // Serial.println(infrenceValue); // print prediction value // // float servoAngle = map(infrenceValue, 0, 100, 0, 179); // // servoMotor.write(servoAngle); //Servo movement // delay(100); } } /** @brief Printf function uses vsnprintf and output using Arduino Serial @param[in] format Variable argument list */ void ei_printf(const char *format, ...) { static char print_buf[1024] = { 0 }; va_list args; va_start(args, format); int r = vsnprintf(print_buf, sizeof(print_buf), format, args); va_end(args); if (r > 0) { Serial.write(print_buf); } } /** @brief PDM buffer full callback Get data and call audio thread callback */ static void pdm_data_ready_inference_callback(void) { int bytesAvailable = PDM.available(); // read into the sample buffer int bytesRead = PDM.read((char *)&sampleBuffer[0], bytesAvailable); if (record_ready == true) { for (int i = 0; i<bytesRead >> 1; i++) { inference.buffers[inference.buf_select][inference.buf_count++] = sampleBuffer[i]; if (inference.buf_count >= inference.n_samples) { inference.buf_select ^= 1; inference.buf_count = 0; inference.buf_ready = 1; } } } } /** @brief Init inferencing struct and setup/start PDM @param[in] n_samples The n samples @return { description_of_the_return_value } */ static bool microphone_inference_start(uint32_t n_samples) { inference.buffers[0] = (signed short *)malloc(n_samples * sizeof(signed short)); if (inference.buffers[0] == NULL) { return false; } inference.buffers[1] = (signed short *)malloc(n_samples * sizeof(signed short)); if (inference.buffers[1] == NULL) { free(inference.buffers[0]); return false; } sampleBuffer = (signed short *)malloc((n_samples >> 1) * sizeof(signed short)); if (sampleBuffer == NULL) { free(inference.buffers[0]); free(inference.buffers[1]); return false; } inference.buf_select = 0; inference.buf_count = 0; inference.n_samples = n_samples; inference.buf_ready = 0; // configure the data receive callback PDM.onReceive(&pdm_data_ready_inference_callback); PDM.setBufferSize((n_samples >> 1) * sizeof(int16_t)); // initialize PDM with: // - one channel (mono mode) // - a 16 kHz sample rate if (!PDM.begin(1, EI_CLASSIFIER_FREQUENCY)) { ei_printf("Failed to start PDM!"); } // set the gain, defaults to 20 PDM.setGain(127); record_ready = true; return true; } /** @brief Wait on new data @return True when finished */ static bool microphone_inference_record(void) { bool ret = true; if (inference.buf_ready == 1) { ei_printf( "Error sample buffer overrun. Decrease the number of slices per model window " "(EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW)\n"); ret = false; } while (inference.buf_ready == 0) { delay(1); } inference.buf_ready = 0; return ret; } /** Get raw audio signal data */ static int microphone_audio_signal_get_data(size_t offset, size_t length, float *out_ptr) { numpy::int16_to_float(&inference.buffers[inference.buf_select ^ 1][offset], out_ptr, length); return 0; } /** @brief Stop PDM and release buffers */ static void microphone_inference_end(void) { PDM.end(); free(inference.buffers[0]); free(inference.buffers[1]); free(sampleBuffer); } #if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_MICROPHONE #error "Invalid model for current sensor." #endif
Text content