diff --git a/Makefile b/Makefile index ca58afd..d474465 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,9 @@ $(MODULES_PATH)/emlkmeans.mpy: $(MODULES_PATH)/eml_iir_q15.mpy: make -C src/eml_iir_q15/ ARCH=$(ARCH) MPY_DIR=$(MPY_DIR_ABS) V=1 clean dist +$(MODULES_PATH)/emlearn_arrayutils.mpy: + make -C src/emlearn_arrayutils/ ARCH=$(ARCH) MPY_DIR=$(MPY_DIR_ABS) V=1 clean dist + emltrees.results: $(MODULES_PATH)/emltrees.mpy MICROPYPATH=$(MODULES_PATH) $(MICROPYTHON_BIN) tests/test_trees.py @@ -52,6 +55,9 @@ emlkmeans.results: $(MODULES_PATH)/emlkmeans.mpy eml_iir_q15.results: $(MODULES_PATH)/eml_iir_q15.mpy MICROPYPATH=$(MODULES_PATH) $(MICROPYTHON_BIN) tests/test_iir_q15.py +emlearn_arrayutils.results: $(MODULES_PATH)/emlearn_arrayutils.mpy + MICROPYPATH=$(MODULES_PATH) $(MICROPYTHON_BIN) tests/test_arrayutils.py + .PHONY: clean clean: @@ -68,8 +74,8 @@ release: zip -r $(RELEASE_NAME).zip $(RELEASE_NAME) #cp $(RELEASE_NAME).zip emlearn-micropython-latest.zip -check: emltrees.results emlneighbors.results emliir.results eml_iir_q15.results emlfft.results emlkmeans.results tinymaix_cnn.results +check: emltrees.results emlneighbors.results emliir.results eml_iir_q15.results emlfft.results emlkmeans.results emlearn_arrayutils.results tinymaix_cnn.results -dist: $(MODULES_PATH)/emltrees.mpy $(MODULES_PATH)/emlneighbors.mpy $(MODULES_PATH)/emliir.mpy $(MODULES_PATH)/eml_iir_q15.mpy $(MODULES_PATH)/emlfft.mpy $(MODULES_PATH)/emlkmeans.mpy $(MODULES_PATH)/tinymaix_cnn.mpy +dist: $(MODULES_PATH)/emltrees.mpy $(MODULES_PATH)/emlneighbors.mpy $(MODULES_PATH)/emliir.mpy $(MODULES_PATH)/eml_iir_q15.mpy $(MODULES_PATH)/emlfft.mpy $(MODULES_PATH)/emlkmeans.mpy $(MODULES_PATH)/emlearn_arrayutils.mpy $(MODULES_PATH)/tinymaix_cnn.mpy diff --git a/README.md b/README.md index b4c1364..4081351 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ It can be combined with feature preprocessing, including neural networks to addr - [xor_trees](./examples/xor_trees/). A "Hello World", using RandomForest. - [mnist_cnn](./examples/mnist_cnn/). Basic image classification, using Convolutional Neural Network. - [har_trees](./examples/har_trees/). Accelerometer-based Human Activity Recognition, using Random Forest +- [soundlevel_iir](./examples/har_trees/). Sound Level Meter, using Infinite Impulse Response (IIR) filters. ## Prerequisites diff --git a/examples/soundlevel_iir/README.md b/examples/soundlevel_iir/README.md new file mode 100644 index 0000000..d5c6c76 --- /dev/null +++ b/examples/soundlevel_iir/README.md @@ -0,0 +1,80 @@ + +# Sound level meter using Infinite Impulse Response (IIR) filters + +This is a sound level meter implemented in MicroPython with +emlearn-micropython. +It implements the standard processing typically used in a +sound level meter used for noise measurements: +A frequency weighting and Fast (125ms) or Slow (1second) +time integration. +It then computes the soundlevel in decibels. +When using Fast integration, this measurement is known as LAF. +Or with Slow integration time, known as LAS. + +## Hardware requirements + +The device must have an `I2S` microphone, +and support the `machine.I2S` MicroPython module. +It has been tested on an ESP32 device, namely the LilyGo T-Camera Mic v1.6. + +## Notes on measurement correctness + +NOTE: In order to have reasonable output values, +the microphone sensitivity must be correctly specified. +Ideally you also check/calibrate wrt to a know good sound level meter. + +NOTE: There is no compensation for non-linear frequency responses in microphone. + +## Install requirements + +Make sure to have Python 3.10+ installed. + +Make sure to have the Unix port of MicroPython 1.23 setup. +On Windows you can use Windows Subsystem for Linux (WSL), or Docker. + +Install the dependencies of this example: +```console +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt +``` + +## Running on host + +```console +curl -o emliir.mpy https://github.com/emlearn/emlearn-micropython/raw/refs/heads/gh-pages/builds/master/x64_6.3/emliir.mpy +curl -o emlearn_arrayutils.mpy https://github.com/emlearn/emlearn-micropython/raw/refs/heads/gh-pages/builds/master/x64_6.3/emlearn_arrayutils.mpy + +micropython soundlevel_test.py +``` + +## Running on device + +!Make sure you have it running successfully on host first. + +Flash your device with a standard MicroPython firmware, +from the MicroPython.org downloads page. + +Download native modules. +```console + +``` + +```console +mpremote cp device/emliir.mpy : +mpremote cp device/emlearn_arrayutils.mpy : +mpremote cp soundlevel.py : +mpremote run soundlevel_run.py +``` + +## Running with live camera input + +This example requires hardware with SSD1306 screen, +in addition to an I2S microphone. +It has been tested on Lilygo T-Camera Mic v1.6. +By adapting the pins, it will probably also work on Lilygo T-Camera S3 v1.6. + +``` +mpremote run soundlevel_screen.py +``` + diff --git a/examples/soundlevel_iir/color_setup.py b/examples/soundlevel_iir/color_setup.py new file mode 100644 index 0000000..1be1423 --- /dev/null +++ b/examples/soundlevel_iir/color_setup.py @@ -0,0 +1,13 @@ + +import gc +from machine import I2C, Pin + +from drivers.ssd1306.ssd1306 import SSD1306_I2C as SSD + +# LiLyGo T-Camera Mic +i2c = I2C(scl=Pin(22), sda=Pin(21)) + +oled_width = 128 +oled_height = 64 +gc.collect() # Precaution before instantiating framebuf +ssd = SSD(oled_width, oled_height, i2c) diff --git a/examples/soundlevel_iir/iot_blynk.py b/examples/soundlevel_iir/iot_blynk.py new file mode 100644 index 0000000..e335303 --- /dev/null +++ b/examples/soundlevel_iir/iot_blynk.py @@ -0,0 +1,95 @@ + + +import time +import requests + +class BlynkClient(): + """ + Ref: + https://docs.blynk.io/en/blynk.cloud/device-https-api/upload-set-of-data-with-timestamps-api + + """ + + def __init__(self, + token, + hostname='blynk.cloud', + protocol='https', + ): + + self._telemetry_url = protocol + '://' + hostname + '/external/api/batch/update?token=' + token + + + def post_telemetry(self, values : list[dict[str, float]]): + """ + Send multiple telemetry values. + The 'time' key should be in Unix milliseconds. + """ + stream_values = {} + + # Blynk HTTP API currently cannot send multiple timestamped values on multiple streams + # so we shuffle the data into being organized per-stream + for datapoint in values: + if 'time' in datapoint.keys(): + t = datapoint['time'] + for key, value in datapoint.items(): + if key == 'time': + continue + if key not in stream_values.keys(): + stream_values[key] = [] + stream_values[key].append((t, value)) + else: + print('WARNING: ignored datapoint without time') + + # NOTE: if no timestamps are provided (and there are multiple values) + # then regular batch update would be better, since it could be done in 1 request + # https://docs.blynk.io/en/blynk.cloud/device-https-api/update-multiple-datastreams-api + + for key, datapoints in stream_values.items(): + self.post_timestamped_values(key, datapoints) + + def post_timestamped_values(self, pin : str, values : list[tuple[int, float]]): + """ + Post multiple values from different times, for 1 stream + Each entry in values must be a tuple with (timestamp, value) + """ + + payload = values + url = self._telemetry_url+f'&pin={pin}' + r = requests.post(url, json=payload) + assert r.status_code == 200, (r.status_code, r.content) + +def unix_time_seconds(): + timestamp = time.time() + epoch_year = time.gmtime(0)[0] + + if epoch_year == 2020: + # seconds between 2000 (MicroPython epoch) and 1970 (Unix epoch) + epoch_difference = 946684800 + timestamp = timestamp + epoch_difference + elif epoch_year == 1970: + pass + else: + raise ValueError('Unknown epoch year') + + return float(timestamp) + +def main(): + # bc3ab311-4e92-11ef-b45a-8f71ad378839 + BLYNK_AUTH_TOKEN = 'Cxvp01Mvo2-A8er9mGRWLfHnPcTNvaTP' + api = BlynkClient(token=BLYNK_AUTH_TOKEN) + + t = int(unix_time_seconds() * 1000) + + values = [] + for s in range(0, 60, 10): + v = {'time': t-(s*1000), 'V1': 78.0+s} + values.append(v) + + api.post_telemetry(values) + print(values) + print('Posted telemetry') + +if __name__ == '__main__': + main() + + diff --git a/examples/soundlevel_iir/iot_thingsboard.py b/examples/soundlevel_iir/iot_thingsboard.py new file mode 100644 index 0000000..ad6a0b1 --- /dev/null +++ b/examples/soundlevel_iir/iot_thingsboard.py @@ -0,0 +1,71 @@ + +import time +import requests + +class ThingsBoard(): + """ + Ref: + https://thingsboard.io/docs/reference/http-api/ + """ + + def __init__(self, + token, + hostname='thingsboard.cloud', + protocol='https', + ): + + + self._telemetry_url = protocol + '://' + hostname + '/api/v1/' + token + '/telemetry' + + def post_telemetry(self, values : list[dict]): + """ + Send multiple telemetry values. + The 'time' key should be in Unix milliseconds. + """ + + def encode_one(d): + o = { 'values': {} } + if 'time' in d: + o['ts'] = d['time'] + for k, v in d.items(): + if k == 'time': + continue + o['values'][k] = v + return o + + payload = [ encode_one(v) for v in values ] + + + r = requests.post(self._telemetry_url, json=payload) + assert r.status_code == 200, (r.status_code, r.content) + +def unix_time_seconds(): + timestamp = time.time() + epoch_year = time.gmtime(0)[0] + + if epoch_year == 2020: + # seconds between 2000 (MicroPython epoch) and 1970 (Unix epoch) + epoch_difference = 946684800 + timestamp = timestamp + epoch_difference + elif epoch_year == 1970: + pass + else: + raise ValueError('Unknown epoch year') + + return float(timestamp) + +# bc3ab311-4e92-11ef-b45a-8f71ad378839 +ACCESS_TOKEN = '7AiV0dXRPWKrxrLcI4wO' +api = ThingsBoard(token=ACCESS_TOKEN) + +t = int(unix_time_seconds() * 1000) + +values = [] +for s in range(0, 60, 10): + v = {'time': t-(s*1000), 'db2': 78.0+s, 'hex': 'ABCEDFE123122312452231DFABCEDF'} + values.append(v) + +api.post_telemetry(values) +print(values) +print('Posted telemetry') + diff --git a/examples/soundlevel_iir/soundlevel.py b/examples/soundlevel_iir/soundlevel.py new file mode 100644 index 0000000..ea35456 --- /dev/null +++ b/examples/soundlevel_iir/soundlevel.py @@ -0,0 +1,262 @@ + +import math +import struct +import array + +from iir_python import IIRFilter +import emliir +import eml_iir_q15 + +class IIRFilterEmlearn: + + def __init__(self, coefficients): + c = array.array('f', coefficients) + self.iir = emliir.new(c) + def process(self, samples): + self.iir.run(samples) + +class IIRFilterEmlearnFixed: + + def __init__(self, coefficients): + c = eml_iir_q15.convert_coefficients(coefficients) + self.iir = eml_iir_q15.new(c) + def process(self, samples): + self.iir.run(samples) + + +# A method for computing A weighting filters etc for any sample-rate +# https://www.dsprelated.com/thread/10546/a-weighting-filter + +def assert_array_typecode(arr, typecode): + actual_typecode = str(arr)[7:8] + assert actual_typecode == typecode, (actual_typecode, typecode) + +a_filter_16k = [ + 1.0383002230320646, 0.0, 0.0, 1.0, -0.016647242439959593, 6.928267021369795e-05, + 1.0, -2.0, 1.0, 1.0, -1.7070508390293027, 0.7174637059318595, + 1.0, -2.0, 1.0, 1.0, -1.9838868447331497, 0.9839517531763131 +] + +@micropython.native +def rms_micropython_native(arr): + acc = 0.0 + for i in range(len(arr)): + v = arr[i] + p = (v * v) + acc += p + mean = acc / len(arr) + out = math.sqrt(mean) + return out + +# Using a limited-precision aware approach based on Cumulative Moving Average +# https://www.codeproject.com/Articles/807195/Precise-and-safe-calculation-method-for-the-averag +@micropython.viper +def rms_micropython_viper(arr) -> object: + buf = ptr16(arr) # XXX: input MUST BE h/uint16 array + l = int(len(arr)) + cumulated_average : int = 0 + cumulated_remainder : int = 0 + addendum : int = 0 + n_values : int = 0 + for i in range(l): + v = int(buf[i]) + value = (v * v) # square it + n_values += 1 + addendum = value - cumulated_average + cumulated_remainder + cumulated_average += addendum // n_values + cumulated_remainder = addendum % n_values + + # sqrt it + out = math.sqrt(cumulated_average) + return out + +@micropython.native +def time_integrate_native(arr, initial, time_constant, samplerate): + acc = initial + dt = 1.0/samplerate + a = dt / (time_constant + dt) + #print('a', a) + + for i in range(len(arr)): + v = arr[i] + p = (v * v) # power is amplitude squared + # exponential time weighting aka 1st order low-pass filter + #acc = (a*p) + ((1-a)*acc) + acc = acc + a*(p - acc) # exponential time weighting aka 1st order low-pass filter + #acc += p + + return acc + +# Use C module for data conversion +# @micropython.native with a for loop is too slow +from emlearn_arrayutils import linear_map +def int16_to_float(inp, out): + return linear_map(inp, out, -2**15, 2**15, -1.0, 1.0) + +def float_to_int16(inp, out): + return linear_map(inp, out, -1.0, 1.0, -2**15, 2**15) + +class SoundlevelMeter(): + + def __init__(self, buffer_size, + samplerate, + mic_sensitivity, + time_integration=0.125, + frequency_weighting='A', + ): + + self._buffer_size = buffer_size + self._sensitivity_dbfs = mic_sensitivity + + buffer_duration = buffer_size / samplerate + assert buffer_duration <= 0.125 + + self._power_integrated_fast = 0.0 + self._samplerate = samplerate + self._time_integration = time_integration + + if not frequency_weighting: + self.frequency_filter = None + elif frequency_weighting == 'A': + #self.frequency_filter = IIRFilter(a_filter_16k) + self.frequency_filter = IIRFilterEmlearn(a_filter_16k) + #self.frequency_filter = IIRFilterEmlearnFixed(a_filter_16k) + + self.float_array = array.array('f', (0 for _ in range(buffer_size))) + else: + raise ValueError('Unsupported frequency_weighting') + + def process(self, samples): + assert len(self.float_array) == self._buffer_size + assert len(samples) == self._buffer_size + assert_array_typecode(samples, 'h') + + # Apply frequency weighting + if self.frequency_filter: + int16_to_float(samples, self.float_array) + self.frequency_filter.process(self.float_array) + float_to_int16(self.float_array, samples) + + #self.frequency_filter.process(samples) + + + #print(self.float_array) + #print(samples) + + spl_max = 94 - self._sensitivity_dbfs + ref = 2**15 + + # no integration - "linear" + if self._time_integration is None: + rms = rms_micropython_native(samples) + # FIXME: causes math domain error + #rms = rms_micropython_viper(samples) + else: + p = time_integrate_native(samples, + self._power_integrated_fast, + self._time_integration, + self._samplerate, + ) + self._power_integrated_fast = p + rms = math.sqrt(p) + + level = 20*math.log10(rms/ref) + level += (spl_max) + + return level + +def read_wave_header(wav_file): + # based on https://github.com/miketeachman/micropython-i2s-examples/blob/master/examples/wavplayer.py + # Copyright (c) 2022 Mike Teachman + + chunk_ID = wav_file.read(4) + if chunk_ID != b"RIFF": + raise ValueError("WAV chunk ID invalid") + chunk_size = wav_file.read(4) + format = wav_file.read(4) + if format != b"WAVE": + raise ValueError("WAV format invalid") + sub_chunk1_ID = wav_file.read(4) + if sub_chunk1_ID != b"fmt ": + raise ValueError("WAV sub chunk 1 ID invalid") + sub_chunk1_size = wav_file.read(4) + audio_format = struct.unpack("WAV converters add + # binary data before "data". So, read a fairly large + # block of bytes and search for "data". + + binary_block = wav_file.read(200) + offset = binary_block.find(b"data") + if offset == -1: + raise ValueError("WAV sub chunk 2 ID not found") + + first_sample_offset = 44 + offset + + return num_channels, sample_rate, bits_per_sample, first_sample_offset + + +def int16_from_bytes(buf, endianness='<'): + + fmt = endianness + 'h' + gen = (struct.unpack(fmt, buf[i:i+3])[0] for i in range(0, len(buf), 2)) + arr = array.array('h', gen) + assert len(arr) == len(buf)//2 + return arr + + +def read_wav(file, samplerate, frames): + """ + """ + + file_channels, file_sr, file_bitdepth, data_offset = read_wave_header(file) + assert samplerate == file_sr, (samplerate, file_sr) + # only int16 mono is supported + assert file_channels == 1, file_channels + assert file_bitdepth == 16, file_bitdepth + + #samples = array.array('h', (0 for _ in range(frames))) + file.seek(data_offset) + + while True: + data = file.read(2*frames) + read_frames = len(data)//2 + samples = int16_from_bytes(data) + yield samples + + +def test_soundlevel(): + + SR = 16000 + chunk_size = 128 + mic_dbfs = -25 + fast_meter = SoundlevelMeter(chunk_size, SR, mic_dbfs, time_integration=0.125) + linear_meter = SoundlevelMeter(chunk_size, SR, mic_dbfs, time_integration=None) + + with open('out.csv', 'w') as out: + with open('test_burst.wav', 'rb') as f: + t = 0.0 + for chunk in read_wav(f, SR, frames=chunk_size): + #print(min(chunk), max(chunk)) + + lf = fast_meter.process(chunk) + level = linear_meter.process(chunk) + + t += ( len(chunk)/SR ) + + line = '{0:.3f},{1:.1f},{2:.1f}'.format(t, level, lf) + print(line) + out.write(line+'\n') + + +if __name__ == '__main__': + test_soundlevel() + + diff --git a/examples/soundlevel_iir/soundlevel_screen.py b/examples/soundlevel_iir/soundlevel_screen.py new file mode 100644 index 0000000..21f1bcd --- /dev/null +++ b/examples/soundlevel_iir/soundlevel_screen.py @@ -0,0 +1,153 @@ + +import math +import gc +import time +import random +import struct +import array +import os +from machine import Pin +from machine import I2S +from machine import I2C, Pin +from machine import Pin, SoftI2C + +from soundlevel import SoundlevelMeter + +# mpremote mip install "github:peterhinch/micropython-nano-gui/drivers/ssd1306" +from color_setup import ssd + +# mpremote mip install "github:peterhinch/micropython-nano-gui" +# On a monochrome display Writer is more efficient than CWriter. +from gui.core.writer import Writer +from gui.core.nanogui import refresh +from gui.widgets.meter import Meter +from gui.widgets.label import Label + +# Fonts +import gui.fonts.courier20 as fixed +import gui.fonts.font6 as small + + +def render_display(db : float): + start_time = time.ticks_ms() + + ssd.fill(0) + #refresh(ssd) + + Writer.set_textpos(ssd, 0, 0) # In case previous tests have altered it + wri = Writer(ssd, fixed, verbose=False) + wri.set_clip(False, False, False) + + warn_text = 'Loud!' + numfield = Label(wri, 5, 0, wri.stringlen('99.9')) + textfield = Label(wri, 40, 34, wri.stringlen(warn_text)) + + numfield.value('{:5.1f} dBa'.format(db)) + + if db > 75.0: + textfield.value(warn_text, True) + else: + textfield.value('') + + refresh(ssd) + + duration = time.ticks_ms() - start_time + print('render-display-done', duration) + + + +def flip_display(ssd, vertical=False): + """ + Vertical flip for SSD1306 + """ + + SEGREMAP = 0xA0 + COMSCANINC = 0xc0 + COMSCANDEC = 0xc8 + + if vertical: + ssd.write_cmd(SEGREMAP | 0x01) + ssd.write_cmd(COMSCANDEC) + else: + ssd.write_cmd(SEGREMAP) + ssd.write_cmd(COMSCANINC) + + +AUDIO_BUFFER_LENGTH = 40000 +AUDIO_BITDEPTH = 16 +AUDIO_FORMAT = I2S.MONO +AUDIO_SAMPLERATE = 16000 + +SCK_PIN = 26 +WS_PIN = 32 +SD_PIN = 33 + +audio_in = I2S(0, + sck=Pin(SCK_PIN), + ws=Pin(WS_PIN), + sd=Pin(SD_PIN), + mode=I2S.RX, + bits=AUDIO_BITDEPTH, + format=AUDIO_FORMAT, + rate=AUDIO_SAMPLERATE, + ibuf=AUDIO_BUFFER_LENGTH, +) + +# allocate sample arrays +chunk_samples = int(AUDIO_SAMPLERATE * 0.125) +mic_samples = array.array('h', (0 for _ in range(chunk_samples))) # int16 +# memoryview used to reduce heap allocation in while loop +mic_samples_mv = memoryview(mic_samples) + +soundlevel_db = 0.0 +next_display_update = 0.0 + +MIC_DBFS=-26 # MSM261S4030H0R + +meter = SoundlevelMeter(buffer_size=chunk_samples, + samplerate=AUDIO_SAMPLERATE, + mic_sensitivity=MIC_DBFS, + time_integration=0.125, + frequency_weighting='A', +) + + +def audio_ready_callback(arg): + global soundlevel_db + start_time = time.ticks_ms() + + db = meter.process(mic_samples) + soundlevel_db = db + + duration = time.ticks_diff(time.ticks_ms(), start_time) + + print('audio-ready', time.ticks_ms(), db, duration) + + # re-trigger audio + num_read = audio_in.readinto(mic_samples_mv) + + +def main(): + + flip_display(ssd, vertical=False) + + # setting a callback function makes the readinto() method Non-Blocking + audio_in.irq(audio_ready_callback) + + # Start microphoe readout. Callback will re-trigger it + num_read = audio_in.readinto(mic_samples_mv) + print('audio-start', num_read) + + while True: + if time.time() >= next_display_update: + render_display(db=soundlevel_db) + last_display_update = time.time() + 0.100 + + print('main-loop-iter', soundlevel_db) + time.sleep_ms(10) + + +if __name__ == '__main__': + main() + + diff --git a/src/emlearn_arrayutils/Makefile b/src/emlearn_arrayutils/Makefile new file mode 100644 index 0000000..f125341 --- /dev/null +++ b/src/emlearn_arrayutils/Makefile @@ -0,0 +1,69 @@ +# Location of top-level MicroPython directory +MPY_DIR = ../../micropython + +# Architecture to build for (x86, x64, armv6m, armv7m, xtensa, xtensawin) +ARCH = x64 + +# The ABI version for .mpy files +MPY_ABI_VERSION := 6.3 + +DIST_DIR := ../../dist/$(ARCH)_$(MPY_ABI_VERSION) + +# Name of module +MOD = emlearn_arrayutils + +# Source files (.c or .py) +SRC = modarrayutils.c + +# Stuff to make soft-float work +# If symbols are undefined, use tools/find_symbols.py to locate object files to add +SOFTFP_O := +SOFTFP_ENABLE := 0 + +ARM_SOFTFP_O := _arm_cmpsf2.o lesf2.o divsf3.o _thumb1_case_uqi.o _arm_fixsfsi.o floatsisf.o fixsfsi.o eqsf2.o gesf2.o addsf3.o mulsf3.o subsf3.o _clzsi2.o _udivsi3.o +ARM_SOFTFP_O += _arm_muldivsf3.o _arm_addsubsf3.o + +ifeq ($(ARCH), xtensawin) + SOFTFP_O += _divsf3.o _ashrdi3.o + SOFTFP_ENABLE=1 +endif + +ifeq ($(ARCH), armv6m) + SOFTFP_O += $(ARM_SOFTFP_O) + SOFTFP_ENABLE=1 +endif +ifeq ($(ARCH), armv7m) + SOFTFP_O += $(ARM_SOFTFP_O) + SOFTFP_ENABLE=1 +endif + +ifeq ($(SOFTFP_ENABLE), 1) + SRC_O += $(SOFTFP_O) + #CLEAN_EXTRA += $(SOFTFP_O) +endif + +# Releases +DIST_FILE = $(DIST_DIR)/$(MOD).mpy +$(DIST_DIR): + mkdir -p $@ + +$(DIST_FILE): $(MOD).mpy $(DIST_DIR) + cp $< $@ + +# Include to get the rules for compiling and linking the module +include $(MPY_DIR)/py/dynruntime.mk + +# CROSS is defined by the included +LIBGCC_FILENAME = $(shell $(CROSS)gcc $(CFLAGS) -print-libgcc-file-name) +$(info $(LIBGCC_FILENAME)) + +CFLAGS += -I$(CMSIS_DSP_DIR)/Include + +_arm_cmpsf2.o: + $(CROSS)ar -x $(LIBGCC_FILENAME) $(SOFTFP_O) + +_divsf3.o: + $(CROSS)ar -x $(LIBGCC_FILENAME) $(SOFTFP_O) + + +dist: $(DIST_FILE) diff --git a/src/emlearn_arrayutils/modarrayutils.c b/src/emlearn_arrayutils/modarrayutils.c new file mode 100644 index 0000000..f6f2861 --- /dev/null +++ b/src/emlearn_arrayutils/modarrayutils.c @@ -0,0 +1,139 @@ +// Include the header file to get access to the MicroPython API +#include "py/dynruntime.h" + +#include + +// memset is used by some standard C constructs +#if !defined(__linux__) +void *memcpy(void *dst, const void *src, size_t n) { + return mp_fun_table.memmove_(dst, src, n); +} +void *memset(void *s, int c, size_t n) { + return mp_fun_table.memset_(s, c, n); +} + +void NORETURN abort() { + while (1) { + ; + } +} + +int +__aeabi_idiv0(int return_value) { + return return_value; +} + +long long +__aeabi_ldiv0(long long return_value) { + return return_value; +} + +#endif + + + +#define MAP_LINEAR(x, in_min, in_max, out_min, out_max) do { \ + const float out = ((x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min) \ + return max(out_min, min(out, out_max)); \ +} + +#define max(a,b) \ +({ \ + __typeof__ (a) _a = (a); \ + __typeof__ (b) _b = (b); \ + _a > _b ? _a : _b; \ +}) + +#define min(a,b) \ +({ \ + __typeof__ (a) _a = (a); \ + __typeof__ (b) _b = (b); \ + _a < _b ? _a : _b; \ +}) + +float +map_linear(float x, float in_min, float in_max, float out_min, float out_max) { + const float out = ((x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min); + return max(out_min, min(out, out_max)); +} + + +int get_typecode_size(char typecode) +{ + // TODO: support int8 also + if (typecode == 'h') { + return 2; + } else if (typecode == 'f') { + return 4; + } else { + // unsupported + return 0; + } +} + +/* linear_map + +Map the elements of an an array linearly between one range of values to another range. +Also accepts an output array, which may be of a different type than the input. + +See map() in Arduino https://www.arduino.cc/reference/en/language/functions/math/map/ + */ +static mp_obj_t +arrayutils_linear_map(size_t n_args, const mp_obj_t *args) { + + // Extract arguments + mp_buffer_info_t in_bufinfo; + mp_get_buffer_raise(args[0], &in_bufinfo, MP_BUFFER_RW); + mp_buffer_info_t out_bufinfo; + mp_get_buffer_raise(args[1], &out_bufinfo, MP_BUFFER_RW); + const float in_min = mp_obj_get_float_to_f(args[2]); + const float in_max = mp_obj_get_float_to_f(args[3]); + const float out_min = mp_obj_get_float_to_f(args[4]); + const float out_max = mp_obj_get_float_to_f(args[5]); + + // Check lengths + const int in_length = in_bufinfo.len / get_typecode_size(in_bufinfo.typecode); + const int out_length = in_bufinfo.len / get_typecode_size(in_bufinfo.typecode); + if (in_length == 0 || out_length == 0) { + mp_raise_ValueError(MP_ERROR_TEXT("unsupported array typecode")); + } + if (in_length != out_length) { + mp_raise_ValueError(MP_ERROR_TEXT("length mismatch")); + } + + // Actually perform the conversions + const char in_type = in_bufinfo.typecode; + const char out_type = out_bufinfo.typecode; + // FIXME: support f/f and h/h + if (in_type == 'h' && out_type == 'f') { + const int16_t *in = in_bufinfo.buf; + float *out = out_bufinfo.buf; + for (int i=0; i rtol: + errors += 1 + + assert errors <= errortol, (errors) + + +def test_arrayutils_linear_map_float_int16(): + + length = 2000 + int16 = array.array('h', (i-length//2 for i in range(length))) + flt = array.array('f', (0 for _ in range(length))) + out = array.array('h', (0 for _ in range(length))) + + # roundtrip should be (approx) equal + start = time.ticks_us() + linear_map(int16, flt, -2**15, 2**15, -1.0, 1.0) + linear_map(flt, out, -1.0, 1.0, -2**15, 2**15) + d = time.ticks_diff(time.ticks_us(), start) + print('linear_map int16>float>int16', length, d/1000.0) + assert_almost_equal(out, int16) + + +test_arrayutils_linear_map_float_int16() + +# Other functionality +# reinterpret +# incorrect lengths, should raise +# roundtrip should be approx equal +# bytearray <-> int16 LE / BE + +# initialize filled +#arr = make_filled(length, value) +#length should be correct +#each element should be equal to value +