diff --git a/.gitignore b/.gitignore old mode 100755 new mode 100644 index c58d722..e0754a7 --- a/.gitignore +++ b/.gitignore @@ -111,3 +111,4 @@ venv.bak/ .* *.json data/* +/v/ diff --git a/README.md b/README.md index bea11cf..c5637e5 100644 --- a/README.md +++ b/README.md @@ -1,56 +1,60 @@ -# Project status March 27th +# What is omnipy? -## Good news everyone! -*Omnipy v1.3 coming soon** +![rileylink android raspberrypi](https://github.com/winemug/omnipy/raw/master/img/droidrlpi.jpg) -Development and testing is going forward now that we've pods for testing. Thanks to all our contributors on the [slack channel](https://join.slack.com/t/omnicore-pdm/shared_invite/enQtNTY0ODcyOTA0ODcwLTNiMDc2OTE5MDk4Yjk0MDZlNDY1MmViMDkyZGYxZmQ2NWIwNDVhMmM0NTM1ZTM4MDdlYjFjNjBmZTRlYzllMmY) who have been sending pods, helping with coding and documentation, providing feedback and spreading the word. +Omnipy is a set of hardware and software components that allows users of the Omnipod insulin pump to automate insulin delivery using [Android APS](https://androidaps.readthedocs.io/en/latest/EN/). Android APS is a long established artificial pancreas system with support for many different insulin pumps, but does not (yet) officially feature Omnipod support. Omnipy bridges this gap and brings Omnipod to Android APS. -![slackers](https://raw.githubusercontent.com/winemug/omnipy/master/img/slackers.png) +Omnipy evolved from my initial efforts on working with OmniPod and is released to public as of February 2019. It has grown over time with support and feedback from the community. -Here's what to look forward to in this (last) feature update for omnipy: +Read the [introduction](https://github.com/winemug/omnipy/wiki) page on the wiki for more information. -- All remaining PDM features: Pod activation, basal rate adjustments, -- Android APS: Messages, warnings, status updates from omnipy and a functional UI for PDM -- Overall connection stability improvements in all areas: Pod, Rileylink and omnipy -- An easier way to set up omnipy with raspberry pi and Android APS on your phone +## November 25th, 2019 +## Important update regarding Android APS version updates +As of today, omnipy users of Android APS have started receiving a warning message in the application about a grace period for certain features shutting down - unless the application is upgraded. -*Watch out on this page for release announcement and links. For questions, updates and support join us in the [omnicore-pdm slack via this invite link](https://join.slack.com/t/omnicore-pdm/shared_invite/enQtNTY0ODcyOTA0ODcwLTNiMDc2OTE5MDk4Yjk0MDZlNDY1MmViMDkyZGYxZmQ2NWIwNDVhMmM0NTM1ZTM4MDdlYjFjNjBmZTRlYzllMmY). +Since omnipy is no longer receiving feature updates, the current Android APS version 2.3 for omnipy will not be upgraded as long as there is no critical issue that affects omnipy users. -**March 17th - omnipy v1.2 released** +Omnipy users are strongly encouraged to follow announcements of the [OmniCore](https://github.com/winemug/OmniCore) project and switch to the first public release as soon as it is made available. -This release fixes an issue with the AAPS client incorrectly registering failed commands as succeeded. For all changes, see [release notes](https://github.com/winemug/omnipy/wiki/Release-Notes) on the wiki for more information. +If you are unable to make the transition to OmniCore before January 1st 2020, please build and compile [this release of Android APS for omnipy](https://github.com/winemug/AndroidAPS/releases/tag/omnipy_v1.4.3_aaps_v2.3.0_build_3) in order to prevent being shut off. -Also see [upgrading](https://github.com/winemug/omnipy/wiki/Upgrading-Software) if you are running a previous version. +### May 12th, Update v1.4.3 available (while stocks last!) -# Wiki Links +https://github.com/winemug/omnipy/releases/tag/v1.4.3 -[Setup](https://github.com/winemug/omnipy/wiki/Setup-and-Configuration) documentation for omnipy +This update fixes various issues from previous 1.4.x releases. Upgrading via image installation is strongly recommended. -[Requirements](https://github.com/winemug/omnipy/wiki/Requirements) +See [Release Notes](https://github.com/winemug/omnipy/wiki/Release-Notes) for more information on what's new and what's changed in this release. -[Setup](https://github.com/winemug/omnipy/wiki/Setup-and-Configuration) +See also [Tips & Tricks](https://github.com/winemug/omnipy/wiki/Tips-and-Tricks) with respect to communication stability. -[F.A.Q.](https://github.com/winemug/omnipy/wiki/Frequently-Asked-Questions) +Please refer to the [Setup documentation](https://github.com/winemug/omnipy/wiki/Setup-and-Configuration) on the wiki for information on how to set up the latest version of omnipy. -[Support](https://github.com/winemug/omnipy/wiki/Support) +## Important information from the author -# About -omnipy is a PDM (personal diabetes manager) emulator for the OmniPod insulin pump and it can be used to command the pump to perform various functions over a Raspberry Pi on a local network. It exposes a basic HTTP API to be utilized by APS systems, and currently integrates with Android APS via a [custom fork](https://github.com/winemug/omnipy/wiki/AndroidAPS-Setup) of Android APS v2.x +As of omnipy v1.4 release, **all development activities on omnipy is stopped**. The project is considered feature-complete and stable enough for general use. -![rileylink android raspberrypi](https://github.com/winemug/omnipy/raw/master/img/droidrlpi.jpg) +Unless a major issue is encountered, there will be no fixes or updates to the current release. Support will still be available in the same form as it has been until today. If anyone is interested in taking the project further in its current form, [get in touch](mailto:barisk@gmail.com). -# Important Background Information -This used to be a pet project, where I investigated the radio communication between the OmniPod and the PDM. Whilst still studying the OmniPod, I have decided that there was enough information available to let it execute basic commands which would make it usable in an artifical pancreas system. I've put together a prototype and integrated it into AndroidAPS for our own use, which became what it is today. +## Next up: OmniCore -As a father of a child with Type I diabetes, I desperately needed something until there was a "proper" solution, so this piece of software became indispensible, albeit its design issues and lack of user-friendliness. +I will focus all development efforts related to Omnipod and Omnipod Dash on [**OmniCore**](https://github.com/winemug/OmniCore), an OmniPod focused software product with a broader scope involving multiple platforms, hardware components and surprise features. -You are welcome to test it and report issues, but be aware you are doing this on your **own risk** ~~and so far it has been tested by **only two people**.~~ Initially tested off-body and on non-t1d volunteers, my son has been using this as a closed loop since November 2018. Since then it has evolved from a raspberry pi with a usb stick (rfcat) to raspberry pi with the RileyLink and Android APS and made gradually available for testing to the general public earlier in March 2019. It's now being tested by more and more people and core functionality has so far shown itself to be stable for a looping setup. +Today, I'm excited to announce that the very first milestone of OmniCore is going to _replace omnipy completely_ to address one particular complaint all omnipy users (including myself) have had so far: Having to carry a raspberry pi and a power supply. -# What's next? +Keep an eye on the [OmniCore](https://github.com/winemug/OmniCore) github page. -This was intended to be a throw-away prototype and I ~~want~~try to keep it that way. The raspberry pi and android are redundant, as both have enough processing power to perform the operations. My focus on Omnipod related development will shift on to the [OmniCore](https://github.com/winemug/OmniCore) project, which will be ready for public testing by ~~mid~~ ~~late March~~ sometime after the final feature update of omnipy. +## Join us -In the mean time, please do report any issues you find so they can be addressed. +For questions, updates and support join us in the [omnicore-pdm slack](https://join.slack.com/t/omnicore-pdm/shared_invite/enQtODM0MzAxMDkzNzI5LWQ5MjEwNWNhOGNlZDI1YTcxNDhmYmNjNDE3YTU2MWY3OGNkMzZlMTc5MTFhYmI5MDBjNTk5YmQ1NGRhZGNlZDM). + +# Wiki Links + +[Requirements](https://github.com/winemug/omnipy/wiki/Requirements) + +[Setup](https://github.com/winemug/omnipy/wiki/Setup-and-Configuration) + +[F.A.Q.](https://github.com/winemug/omnipy/wiki/Frequently-Asked-Questions) + +[Support](https://github.com/winemug/omnipy/wiki/Support) -# Information on RileyLink "433" -It seems that the release announcement of [RileyLink433](https://getrileylink.org/product/rileylink433/) got people excited about OmniPod loopability. For clarification: RL 433 is **not** an absolute requirement. If you have the old RileyLink, it will still work - however in a _very_ limited range. It's strongly suggested to change the antenna, for which purpose RileyLink also provides an antenna upgrade kit. Please see the [requirements](https://github.com/winemug/omnipy/wiki/Requirements) section in the wiki and [Increasing Radio Range](https://github.com/winemug/omnipy/wiki/Increasing-Radio-Range) for what you can further do with your RileyLink. diff --git a/batt_check.py b/batt_check.py new file mode 100644 index 0000000..a26b395 --- /dev/null +++ b/batt_check.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# Dan Evans 23-29/4/19b, based on script by Alex Eames +# NB THIS VERSION FOR DEPLOYMENT + +# nb at setup need : sudo apt-get install python3-rpi.gpio + +import time +import RPi.GPIO as GPIO +from threading import Thread, RLock + + +class SpiBatteryVoltageChecker: + def __init__(self): + # Hardware setup + self.adcs = [0] # voltage divider connected to channel 0 of mcp3002 + self.cutoff = 3 # low battery cutoff (when LipoShim shuts dowwn) + self.maxvolts = 4.2 # max voltage for the battery, equivalent to 100% charge + self.vref = 3.3 # vref of the ADC + self.res1 = 180 # resistor connected to VBATT (/1000) + self.res2 = 100 # resistor connected to GND (/1000) + self.reps = 10 # how many times to take each measurement for averaging + self.pcround = 1 # round % battery to nearest + + # Define Pins/Ports on ADC + self.SPICLK = 16 + self.SPIMISO = 20 + self.SPIMOSI = 21 + self.SPICS = 13 + + self.battery_level = -1 + self.adc_readings = [] + self.sync_lock = RLock() + + try: + # Set up set up GPIO & SPI interface pins + GPIO.setwarnings(False) + GPIO.setmode(GPIO.BCM) + GPIO.setup(self.SPIMOSI, GPIO.OUT) + GPIO.setup(self.SPIMISO, GPIO.IN) + GPIO.setup(self.SPICLK, GPIO.OUT) + GPIO.setup(self.SPICS, GPIO.OUT) + + # initial reading to determine availability + average = 0 + for i in range(0, 10): + average = self._get_moving_average() + + bp = self._get_percentage(average) + if bp <= 0.01: + print("spi reader not detected") + GPIO.cleanup() + else: + self.battery_level = bp + self.service_thread = Thread(target=self._service_loop) + self.service_thread.setDaemon(True) + self.service_thread.start() + except: + print("Failed to set up GPIO pins for battery level reading") + GPIO.cleanup() + + def get_measurement(self): + with self.sync_lock: + return self.battery_level + + def _service_loop(self): + while True: + time.sleep(60) + try: + with self.sync_lock: + self.battery_level = self._get_percentage(self._get_moving_average()) + except: + pass + + # ADC code based on an adafruit example for mcp3008 + def _readadc(self, adcnum, clockpin, mosipin, misopin, cspin): + if ((adcnum > 1) or (adcnum < 0)): + return -1 + if (adcnum == 0): + commandout = 0x6 + else: + commandout = 0x7 + + GPIO.output(cspin, True) + GPIO.output(clockpin, False) # start clock low + GPIO.output(cspin, False) # bring CS low + + commandout <<= 5 # we only need to send 3 bits here + for i in range(3): + if (commandout & 0x80): + GPIO.output(mosipin, True) + else: + GPIO.output(mosipin, False) + commandout <<= 1 + GPIO.output(clockpin, True) + GPIO.output(clockpin, False) + + adcout = 0 + # read in one empty bit, one null bit and 10 ADC bits + for i in range(12): + GPIO.output(clockpin, True) + GPIO.output(clockpin, False) + adcout <<= 1 + if (GPIO.input(misopin)): + adcout |= 0x1 + + GPIO.output(cspin, True) + + adcout /= 2 # first bit is 'null' so drop it + return adcout + + def _get_moving_average(self): + self.adc_readings.append(self._get_adc_reading()) + reading_count = len(self.adc_readings) + if reading_count > self.reps: + self.adc_readings = self.adc_readings[reading_count-self.reps:reading_count] + return float(sum(self.adc_readings) / len(self.adc_readings)) + + def _get_adc_reading(self): + adc_sum = 0 + for adcnum in self.adcs: + try: + adc_sum += self._readadc(adcnum, self.SPICLK, self.SPIMOSI, self.SPIMISO, self.SPICS) + except: + print("Error reading adc value") + time.sleep(0.05) + return float(adc_sum / len(self.adcs)) + + def _get_percentage(self, adc_reading): + # convert analogue reading to volts and %, accounting for vref and setup of resistor bridge + volts = adc_reading * ( self.vref / 1024 ) * (self.res1 + self.res2) / self.res2 + voltspc = int ( 100 * ( volts - self.cutoff ) / ( self.maxvolts - self.cutoff ) ) + voltspcround = self.pcround * round( voltspc / self.pcround ) + if (voltspcround > 100): + voltspcround = 100 + if (voltspcround < 0): + voltspcround = 0 + return voltspcround + + +if __name__ == '__main__': + sbc = SpiBatteryVoltageChecker() + while True: + try: + print("Battery is now at %d percent" % sbc.get_measurement()) + time.sleep(10) + except KeyboardInterrupt: + break diff --git a/cc1110-old.json b/cc1110-old.json new file mode 100644 index 0000000..30b17a7 --- /dev/null +++ b/cc1110-old.json @@ -0,0 +1,41 @@ +{ + "common": + { + "SYNC1": "0xA5", + "SYNC0": "0x5A", + "PKTLEN": "0x50", + "PKTCTRL1": "0x20", + "PKTCTRL0": "0x00", + "ADDR": "0x00", + "CHANNR": "0x00", + "FSCTRL1": "0x0F", + "FSCTRL0": "0x00", + "FREQ2": "0x12", + "FREQ1": "0x14", + "FREQ0": "0x46", + "MDMCFG4": "0xCA", + "MDMCFG3": "0xBC", + "MDMCFG2": "0x02", + "MDMCFG1": "0x40", + "MDMCFG0": "0x11", + "DEVIATN": "0x54", + "MCSM2": "0x07", + "MCSM1": "0x30", + "MCSM0": "0x19", + "FOCCFG": "0x17", + "FREND0": "0x10", + "FSCAL3": "0xE9", + "FSCAL2": "0x2A", + "FSCAL1": "0x00", + "FSCAL0": "0x1F", + "TEST1": "0x35", + "TEST0": "0x09", + "PA_TABLE0": "0x84" + }, + "rx": + { + }, + "tx": + { + } +} \ No newline at end of file diff --git a/cc1110-target.json b/cc1110-target.json new file mode 100644 index 0000000..ec33dc9 --- /dev/null +++ b/cc1110-target.json @@ -0,0 +1,48 @@ +{ + "common": + { + "SYNC1": "0xD3", + "SYNC0": "0x91", + "PKTLEN": "0x28", + "PKTCTRL1": "0x20", + "PKTCTRL0": "0x00", + "ADDR": "0x00", + "CHANNR": "0x00", + "FSCTRL1": "0x0F", + "FSCTRL0": "0x00", + "FREQ2": "0x12", + "FREQ1": "0x14", + "FREQ0": "0x60", + "MDMCFG4": "0xFA", + "MDMCFG3": "0xB9", + "MDMCFG2": "0x12", + "MDMCFG1": "0x41", + "MDMCFG0": "0xF0", + "DEVIATN": "0x36", + "MCSM2": "0x07", + "MCSM1": "0x30", + "MCSM0": "0x19", + "FOCCFG": "0x17", + "BSCFG": "0x6C", + "AGCCTRL2": "0x43", + "AGCCTRL1": "0x40", + "AGCCTRL0": "0x91", + "FREND1": "0x56", + "FREND0": "0x10", + "FSCAL3": "0xE9", + "FSCAL2": "0x2A", + "FSCAL1": "0x00", + "FSCAL0": "0x1F", + "TEST1": "0x31", + "TEST0": "0x09", + "PA_TABLE0": "0x60" + }, + "rx": + { + + }, + "tx": + { + + } +} \ No newline at end of file diff --git a/cc1110.json b/cc1110.json new file mode 100644 index 0000000..44f6c7c --- /dev/null +++ b/cc1110.json @@ -0,0 +1,56 @@ +{ + "common": + { + "SYNC1": "0xA5", + "SYNC0": "0x5A", + "PKTLEN": "0x50", + "PKTCTRL1": "0x20", + "PKTCTRL0": "0x00", + "ADDR": "0x00", + "CHANNR": "0x00", + + "FSCTRL1": "0x0F", + "FSCTRL0": "0x00", + + "MDMCFG4": "0xBA", + "MDMCFG3": "0xB9", + "MDMCFG2": "0x12", + "MDMCFG1": "0x43", + "MDMCFG0": "0x11", + + "MCSM2": "0x07", + "MCSM1": "0x30", + "MCSM0": "0x19", + + "FOCCFG": "0x17", + "BSCFG": "0x6C", + "AGCCTRL2": "0x43", + "AGCCTRL1": "0x40", + "AGCCTRL0": "0x91", + "FREND1": "0x56", + "FREND0": "0x10", + + "FSCAL3": "0xE9", + "FSCAL2": "0x2A", + "FSCAL1": "0x00", + "FSCAL0": "0x1F", + "TEST1": "0x31", + "TEST0": "0x09", + + "PA_TABLE0": "0xC8" + }, + "rx": + { + "DEVIATN": "0x46", + "FREQ2": "0x12", + "FREQ1": "0x14", + "FREQ0": "0x77" + }, + "tx": + { + "DEVIATN": "0x50", + "FREQ2": "0x12", + "FREQ1": "0x14", + "FREQ0": "0x56" + } +} diff --git a/dbsync.py b/dbsync.py new file mode 100644 index 0000000..b16a9f4 --- /dev/null +++ b/dbsync.py @@ -0,0 +1,180 @@ +import time +from pymongo import MongoClient +from mongo_sucks import mongo_find +import glob +import simplejson as json +import sqlite3 + +class DbSyncer: + + def __init__(self, bulk_path, current_pod_db_path): + with open("settings.json", "r") as stream: + self.settings = json.load(stream) + self.current_pod_id = None + self.current_pod_rowid = None + self.bulk_path = bulk_path + self.current_pod_db_path = current_pod_db_path + + def run(self): + print("running bulk import") + self.bulk_import() + print("now watching") + print(f'Current pod: {self.current_pod_id} Last synced id: {self.current_pod_rowid}') + while True: + time.sleep(10) + try: + ret = self.import_db(self.current_pod_db_path, False) + if ret is not None: + pod_id, row_id = ret + if pod_id != self.current_pod_id: + print(f"Pod change detected - running bulk import") + self.bulk_import() + if pod_id != self.current_pod_id or row_id != self.current_pod_rowid: + self.current_pod_id, self.current_pod_rowid = ret + print(f'Current pod: {self.current_pod_id} Last synced id: {self.current_pod_rowid}') + + except Exception as e: + print("Error importing: %s" % e) + time.sleep(120) + + def bulk_import(self): + for db_path in glob.glob(self.bulk_path + "/*.db"): + try: + abandoned = db_path != self.current_pod_db_path + ret = self.import_db(db_path, abandoned) + if not abandoned and ret is not None: + self.current_pod_id, self.current_pod_rowid = ret + + except Exception as e: + print(f'Skipping {db_path} due error: {e}') + + def import_db(self, db_path: str, abandoned: bool, last_row_id: int = None, last_pod_id: str = None): + mongo_uri = self.settings["mongo_url"] + pod_id = None + with sqlite3.connect(db_path) as conn: + try: + cursor = None + sql = "SELECT rowid, timestamp, pod_json FROM pod_history WHERE pod_state > 0" + cursor = conn.execute(sql) + sqlite_rows = cursor.fetchall() + finally: + if cursor is not None: + cursor.close() + + if sqlite_rows is not None and len(sqlite_rows) > 0: + js = json.loads(sqlite_rows[0][2]) + if "pod_id" not in js or js["pod_id"] is None: + pod_id = "L" + str(js["id_lot"]) + "T" + str(js["id_t"]) + else: + pod_id = js["pod_id"] + + if pod_id is None: + print("No pod seems to be registered on %s" % db_path) + return None + + if last_pod_id is not None and last_pod_id != pod_id: + last_row_id = None + + with MongoClient(mongo_uri) as mongo_client: + db = mongo_client.get_database("nightscout") + coll_pod_entries = db.get_collection("omnipy") + coll_pods = db.get_collection("pods") + + id_list = None + if last_row_id is None: + id_entries = mongo_find(coll_pod_entries, {'pod_id': pod_id}, projection=['last_command_db_id']) + id_list = [e['last_command_db_id'] for e in id_entries] + + first_success_ts = None + last_success_ts = None + start_ts = None + deactivate_ts = None + fault_ts = None + total_delivered = 0.0 + fault_code = None + + entry_added = False + for row in sqlite_rows: + last_db_id = row[0] + if row[2] is None: + continue + + if last_row_id is not None and row[0] <= last_row_id: + continue + + js = json.loads(row[2]) + if "data" in js: + js = js["data"] + + js["pod_id"] = pod_id + js["last_command_db_id"] = row[0] + js["last_command_db_ts"] = row[1] + + if not row[0] in id_list: + coll_pod_entries.insert_one(js) + entry_added = True + + if "insulin_delivered" not in js: + pass + total_delivered = js["insulin_delivered"] + + if js["state_faulted"] and fault_ts is None: + fault_code = js["fault_event"] + current_minute = js["state_active_minutes"] + faulted_at = js["fault_event_rel_time"] + fault_ts = row[1] - (current_minute - faulted_at + 1) * 60 + + lc = js["last_command"] + if lc is not None and lc["success"]: + if first_success_ts is None: + first_success_ts = row[1] + last_success_ts = row[1] + if lc["command"] == "DEACTIVATE" and deactivate_ts is None: + deactivate_ts = row[1] + if lc["command"] == "START" and start_ts is None: + if js["var_activation_date"] is not None: + start_ts = js["var_activation_date"] + else: + start_ts = row[1] + + cursor.close() + if entry_added: + pod = coll_pods.find_one({'pod_id': pod_id}) + new_pod = False + if pod is None: + new_pod = True + pod = dict() + pod['pod_id'] = pod_id + if start_ts is None: + pod["start"] = first_success_ts + else: + pod["start"] = start_ts + + pod["abandoned"] = False + + if fault_ts is not None: + pod["end"] = fault_ts + elif deactivate_ts is not None: + pod["end"] = deactivate_ts + elif abandoned: + pod["end"] = last_success_ts + pod["abandoned"] = True + else: + pod["end"] = None + + pod["delivered"] = total_delivered + pod["fault_code"] = fault_code + pod["last_rowid"] = last_db_id + + if new_pod: + coll_pods.insert_one(pod) + else: + coll_pods.replace_one(pod, pod) + + return pod_id, last_db_id + + +if __name__ == '__main__': + dbs = DbSyncer('/home/pi/omnipy/data', '/home/pi/omnipy/data/pod.db') + while True: + dbs.run() diff --git a/img/Console-ui_AdvancedSettings.png b/img/Console-ui_AdvancedSettings.png new file mode 100644 index 0000000..31fb6ed Binary files /dev/null and b/img/Console-ui_AdvancedSettings.png differ diff --git a/img/Console-ui_MainMenu.png b/img/Console-ui_MainMenu.png new file mode 100644 index 0000000..4890063 Binary files /dev/null and b/img/Console-ui_MainMenu.png differ diff --git a/img/prime_OK_1b.jpg b/img/prime_OK_1b.jpg new file mode 100644 index 0000000..048855c Binary files /dev/null and b/img/prime_OK_1b.jpg differ diff --git a/img/prime_OK_2b.jpg b/img/prime_OK_2b.jpg new file mode 100644 index 0000000..0f1b5f9 Binary files /dev/null and b/img/prime_OK_2b.jpg differ diff --git a/img/prime_notOK_1b.jpg b/img/prime_notOK_1b.jpg new file mode 100644 index 0000000..c40f4eb Binary files /dev/null and b/img/prime_notOK_1b.jpg differ diff --git a/mongo_sucks.py b/mongo_sucks.py new file mode 100644 index 0000000..680ea86 --- /dev/null +++ b/mongo_sucks.py @@ -0,0 +1,19 @@ +from pymongo.collection import Collection +from pymongo.command_cursor import CommandCursor +from pymongo.cursor import Cursor, CursorType + + +def mongo_aggregate(coll: Collection, pipeline) -> []: + return mongo_result(coll.aggregate(pipeline)) + + +def mongo_find(coll: Collection, query, sort=None, projection=None) -> []: + return mongo_result(coll.find(filter=query, sort=sort, projection=projection, cursor_type=CursorType.EXHAUST)) + + +def mongo_result(cc: Cursor) -> []: + ret = [] + with cc: + for r in cc: + ret.append(r) + return ret diff --git a/mq-g.py b/mq-g.py new file mode 100644 index 0000000..2a034b2 --- /dev/null +++ b/mq-g.py @@ -0,0 +1,360 @@ +#!/home/pi/v/bin/python3 +import concurrent +import glob +import sqlite3 +import time +from podcomm.pdm import Pdm +from podcomm.pod import Pod +from podcomm.definitions import * +import simplejson as json +from decimal import * +from google.cloud import pubsub_v1 +import os + + +class MqOperator(object): + def __init__(self): + configureLogging() + self.logger = getLogger(with_console=True) + get_packet_logger(with_console=True) + self.logger.info("mq operator is starting") + + with open("settings.json", "r") as stream: + self.settings = json.load(stream) + + self.mqtt_client = None + + self.i_pdm = None + self.i_pod = None + self.g_pdm = None + self.g_pod = None + + self.decimal_zero = Decimal("0") + self.i_rate_requested = None + self.i_rate_duration_requested = None + self.i_bolus_requested = self.decimal_zero + self.g_rate_requested = None + self.g_bolus_requested = self.decimal_zero + + self.started = time.time() + self.insulin_bolus_pulse_interval = 4 + self.clock_updated = time.time() + self.next_pdm_run = time.time() + self.publisher = None + + def run(self): + # self.ntp_update() + self.i_pod = Pod.Load("/home/pi/omnipy/data/pod.json", "/home/pi/omnipy/data/pod.db") + self.i_pdm = Pdm(self.i_pod) + # self.i_pdm.start_radio() + + subscriber_client = pubsub_v1.SubscriberClient() + subscription_path = subscriber_client.subscription_path("omnicore17", "py-cmd") + + streaming_pull_future = subscriber_client.subscribe(subscription_path, callback=self.google_sub_callback) + + self.publisher = pubsub_v1.PublisherClient( + # Optional + batch_settings=pubsub_v1.types.BatchSettings( + max_bytes=1024, # One kilobyte + max_latency=1, # One second + ), + + # Optional + publisher_options=pubsub_v1.types.PublisherOptions( + enable_message_ordering=False, + flow_control=pubsub_v1.types.PublishFlowControl( + message_limit=2000, + limit_exceeded_behavior=pubsub_v1.types.LimitExceededBehavior.BLOCK, + ), + ), + + # Optional + client_config={ + "interfaces": { + "google.pubsub.v1.Publisher": { + "retry_params": { + "messaging": { + 'total_timeout_millis': 300000, # default: 600000 + } + } + } + } + }, + ) + + while True: + try: + streaming_pull_future.result(timeout=3) + except concurrent.futures._base.TimeoutError: + if self.next_pdm_run <= time.time(): + self.run_pdm() + except Exception as e: + self.logger.error("What the err?", e) + raise e + + def google_sub_callback(self, message): + self.logger.info( + "Received message {} of message ID {}\n".format(message, message.message_id) + ) + js = None + try: + str_message = bytes.decode(message.data, encoding="ASCII") + js = json.loads(str(str_message)) + except Exception as e: + self.logger.error("failed to parse message", e) + if js is not None: + try: + topic = js["topic"] + msg = js["msg"] + if self.on_message(topic, msg): + message.ack() + except Exception as e: + self.logger.error("failed to process message", e) + self.logger.info("Acknowledged message {}\n".format(message.message_id)) + + + def on_message(self, topic, message): + try: + if topic == self.settings["mqtt_command_topic"]: + cmd_split = message.split(' ') + if cmd_split[0] == "temp": + temp_rate = self.fix_decimal(cmd_split[1]) + temp_duration = None + if len(cmd_split) > 2: + temp_duration = self.fix_decimal(cmd_split[2]) + self.set_insulin_rate(temp_rate, temp_duration) + self.next_pdm_run = time.time() + elif cmd_split[0] == "bolus": + pulse_interval = None + bolus = self.fix_decimal(cmd_split[1]) + if len(cmd_split) > 2: + pulse_interval = int(cmd_split[2]) + self.set_insulin_bolus(bolus, pulse_interval) + self.next_pdm_run = time.time() + elif cmd_split[0] == "status": + self.next_pdm_run = time.time() + elif cmd_split[0] == "reboot": + self.send_msg("sir yes sir") + os.system('sudo shutdown -r now') + else: + self.send_msg("lol what?") + elif topic == self.settings["mqtt_sync_request_topic"]: + if message == "latest": + self.send_result(self.i_pod) + else: + spl = message.split(' ') + pod_id = spl[0] + req_ids = spl[1:] + self.fill_request(pod_id, req_ids) + else: + self.logger.warn("unknown topic: " + topic) + return False + return True + except Exception as e: + self.logger.error(e) + self.send_msg("that didn't seem right") + return False + + def set_insulin_rate(self, rate: Decimal, duration_hours: Decimal): + if duration_hours is None: + self.send_msg("Rate request: Insulin %02.2fU/h" % rate) + else: + self.send_msg("Rate request: Insulin {:02.2f}U/h Duration: {:02.2f}h".format(rate, duration_hours)) + self.i_rate_requested = rate + if duration_hours is not None: + self.i_rate_duration_requested = duration_hours + else: + self.i_rate_duration_requested = Decimal("3.0") + + self.send_msg("Rate request submitted") + + def set_insulin_bolus(self, bolus: Decimal, pulse_interval: int): + self.send_msg("Bolus request: Insulin %02.2fU" % bolus) + self.i_bolus_requested = bolus + if pulse_interval is not None: + self.send_msg("Pulse interval set: %d" % pulse_interval) + self.insulin_bolus_pulse_interval = pulse_interval + else: + self.insulin_bolus_pulse_interval = 6 + self.send_msg("Bolus request submitted") + + def run_pdm(self): + self.next_pdm_run = time.time() + 1800 + if not self.check_running(): + return + + if not self.deactivate_on_err(): + return + + if not self.update_status(): + return + + if not self.schedule_request(): + return + + if not self.bolus_request(): + return + + def check_running(self): + progress = self.i_pod.state_progress + if 0 <= progress < 8 or progress == 15: + self.next_pdm_run = time.time() + 300 + return False + return True + + def deactivate_on_err(self): + if self.i_pod.state_faulted: + self.send_msg("deactivating pod") + try: + self.i_pdm.deactivate_pod() + self.send_msg("all is well, all is good") + self.next_pdm_run = time.time() + 300 + return False + except: + self.send_msg("deactivation failed") + self.next_pdm_run = time.time() + return False + finally: + self.send_result(self.i_pod) + return True + + def update_status(self): + self.send_msg("checking pod status") + try: + self.i_pdm.update_status() + self.send_msg("pod reservoir remaining: %02.2fU" % self.i_pod.insulin_reservoir) + + if self.i_pod.insulin_reservoir > 20: + self.next_pdm_run = time.time() + 1800 + elif self.i_pod.insulin_reservoir > 10: + self.next_pdm_run = time.time() + 600 + else: + self.next_pdm_run = time.time() + 300 + return True + except: + self.send_msg("failed to get pod status") + self.next_pdm_run = time.time() + 60 + return False + finally: + self.send_result(self.i_pod) + + def schedule_request(self): + if self.i_rate_requested is not None: + rate = self.i_rate_requested + duration = self.i_rate_duration_requested + + self.send_msg("setting temp %02.2fU/h for %02.2f hours" % (rate, duration)) + try: + self.i_pdm.set_temp_basal(rate, duration) + except: + self.send_msg("failed to set tb") + self.next_pdm_run = time.time() + return False + finally: + self.send_result(self.i_pod) + + self.send_msg("temp set") + self.i_rate_requested = None + return True + + def bolus_request(self): + if self.i_bolus_requested is not None and self.i_bolus_requested > self.decimal_zero: + self.send_msg("Bolusing %02.2fU" % self.i_bolus_requested) + try: + self.i_pdm.bolus(self.i_bolus_requested, self.insulin_bolus_pulse_interval) + self.i_bolus_requested = None + except: + self.send_msg("failed to execute bolus") + self.next_pdm_run = time.time() + 60 + return False + finally: + self.send_result(self.i_pod) + + self.i_bolus_requested = None + self.send_msg("bolus is bolus") + return True + + def fix_decimal(self, f): + i_ticks = round(float(f) * 20.0) + d_val = Decimal(i_ticks) / Decimal("20") + return d_val + + def send_result(self, pod): + msg = pod.GetString() + if pod.pod_id is None: + return + self.logger.info("sending pod result") + self.send_msg(msg, self.settings["mqtt_json_topic"]) + self.send_msg(msg, self.settings["mqtt_status_topic"]) + + def send_msg(self, msg, topic="omnipy_response"): + self.logger.info("sending msg: " + msg + " topic: " + topic) + topic_path = self.publisher.topic_path("omnicore17", "py-rsp") + msg_str = json.dumps({"topic": topic, "msg": msg}) + self.publisher.publish(topic_path, data=msg_str.encode(encoding="ASCII")) + + def ntp_update(self): + if self.clock_updated is not None: + if time.time() - self.clock_updated < 3600: + return + + self.logger.info("Synchronizing clock with network time") + try: + os.system('sudo systemctl stop ntp') + os.system('sudo ntpd -gq') + os.system('sudo systemctl start ntp') + self.logger.info("update successful") + self.clock_updated = time.time() + except: + self.logger.info("update failed") + + def fill_request(self, pod_id, req_ids): + db_path = self.find_db_path(pod_id) + if db_path is None: + self.send_msg("but I can't?") + return + + with sqlite3.connect(db_path) as conn: + for req_id in req_ids: + req_id = int(req_id) + cursor = conn.execute("SELECT rowid, timestamp, pod_json FROM pod_history WHERE rowid = " + str(req_id)) + row = cursor.fetchone() + if row is not None: + js = json.loads(row[2]) + js["pod_id"] = pod_id + js["last_command_db_id"] = row[0] + js["last_command_db_ts"] = row[1] + + self.send_msg(self.settings["mqtt_json_topic"], + json.dumps(js)) + cursor.close() + + def find_db_path(self, pod_id): + self.i_pod._fix_pod_id() + if self.i_pod.pod_id == pod_id: + return "/home/pi/omnipy/data/pod.db" + + found_db_path=None + for db_path in glob.glob("/home/pi/omnipy/data/*.db"): + with sqlite3.connect(db_path) as conn: + cursor = conn.execute("SELECT pod_json FROM pod_history WHERE pod_state > 0 LIMIT 1") + row = cursor.fetchone() + if row is not None: + js = json.loads(row[0]) + if "pod_id" not in js or js["pod_id"] is None: + found_id = "L" + str(js["id_lot"]) + "T" + str(js["id_t"]) + else: + found_id = js["pod_id"] + + if found_id == pod_id: + found_db_path = db_path + break + cursor.close() + return found_db_path + + +if __name__ == '__main__': + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/pi/omnipy/google-settings.json" + operator = MqOperator() + operator.run() diff --git a/mq.py b/mq.py new file mode 100644 index 0000000..e17a003 --- /dev/null +++ b/mq.py @@ -0,0 +1,446 @@ +#!/home/pi/v/bin/python3 +import datetime as dt +import glob +import re +import sqlite3 +import requests +import time +import signal +import sys +from threading import Event +from omnipy_messenger import OmniPyMessengerClient +from podcomm.pdm import Pdm +from podcomm.pod import Pod +from podcomm.definitions import * +import simplejson as json +from decimal import Decimal + + +def get_now(): + return int(time.time() * 1000) + + +def get_ticks(d: Decimal) -> int: + return int(round(d / Decimal("0.05"))) + + +def ticks_to_decimal(ticks: int) -> Decimal: + return Decimal("0.05") * ticks + + +def seconds_to_hours(minutes: int) -> Decimal: + return Decimal(minutes) / Decimal("3600") + + +def is_expired(req): + if "expiration" in req and req["expiration"] is not None: + expiration = int(req["expiration"]) + if get_now() > expiration: + return True + return False + + +def status_match(req, pod): + if "pod_updated" in req and req["pod_updated"] is not None: + return req["pod_updated"] == int(pod["state_last_updated"] * 1000) + + +def get_json(message_data): + return json.loads(bytes.decode(message_data, encoding='UTF-8')) + + +def ntp_update(): + os.system('sudo systemctl stop ntp') + try: + if os.system('sudo ntpd -gq') != 0: + raise OSError() + finally: + os.system('sudo systemctl start ntp') + + +def restart(): + os.system('shutdown -r now') + + +def shutdown(): + os.system('shutdown -h now') + + +class MqOperator(object): + def __init__(self): + configureLogging() + self.logger = getLogger(with_console=True) + get_packet_logger(with_console=True) + self.logger.info("mq operator is starting") + + with open("settings.json", "r") as stream: + self.settings = json.load(stream) + + self.i_pdm: Pdm = None + self.i_pod: Pod = None + + self.insulin_bolus_pulse_interval = 4 + self.next_pdm_run = time.time() + + self.exit_requested = Event() + self.stopped = Event() + self.messages = [] + self.omc: OmniPyMessengerClient = None + self.db_path_cache = dict() + self.clock_updated = False + + def run(self): + time.sleep(5) + try: + self.omc = OmniPyMessengerClient('/home/pi/omnipy/data/messenger.db') + self.i_pod = Pod.Load("/home/pi/omnipy/data/pod.json", "/home/pi/omnipy/data/pod.db") + self.i_pdm = Pdm(self.i_pod) + self.i_pdm.start_radio() + + next_ping = time.time() - 10 + while True: + ts_now = time.time() + if ts_now > next_ping: + try: + requests.get("https://hc-ping.com/0a575069-cdf8-417b-abad-bb9d32acd5ea", timeout=10) + next_ping = ts_now + 300 + except Exception as ex: + next_ping = ts_now + 60 + self.logger.error('Failed to ping hc, not online?', ex) + self.process_messages() + if self.exit_requested.is_set(): + return + finally: + self.stopped.set() + + def pull_messages(self): + messages = self.omc.get_messages() + if self.exit_requested.is_set(): + return False + + new_request_received = False + for message in messages: + try: + request = get_json(message['message']) + if request['id'] in [msg['request']['id'] for msg in self.messages]: + continue + new_request_received = True + self.logger.debug(f'new request received, type: {request["type"]}, id: {request["id"]}') + message['request'] = request + self.messages.append(message) + except Exception as ex: + self.logger.error("Error parsing message, ignoring.", ex) + self.omc.mark_as_read(message['id']) + return new_request_received + + def process_messages(self): + try: + while True: + while self.pull_messages(): + time.sleep(3) + + if len(self.messages) == 0 or self.exit_requested.is_set(): + break + + self.filter_state_outdated() + self.filter_expired() + self.sort_requests() + self.filter_redundant() + + if len(self.messages) == 0: + break + message = self.messages[0] + request = message['request'] + result = None + try: + result = self.perform_request(request) + self.logger.debug("Request executed") + except Exception as ex: + self.logger.error("Error performing request", ex) + result = dict(error=str(ex)) + finally: + self.omc.mark_as_read(message["id"]) + self.messages.remove(message) + self.send_response(request, result) + + except Exception as ex: + self.logger.error("Error performing requests", ex) + + def send_response(self, request: dict, result: dict): + response = dict(request_id=request['id'], result=result) + self.omc.publish_bin(json.dumps(response, ensure_ascii=False).encode('UTF-8')) + + def filter_expired(self): + not_expired = [] + for message in self.messages: + request = message['request'] + if "expiration" in request and request["expiration"] is not None: + expiration = int(request["expiration"]) + else: + expiration = int(message['publish_time']) + 180 * 1000 + t_now = get_now() + if t_now > expiration: + self.omc.mark_as_read(message['id']) + self.send_response(request, dict(executed=False, reason='expired', + expiration=expiration, + reported_publish_time=message['publish_time'], + receive_time=message['receive_time'], + process_time=t_now)) + else: + not_expired.append(message) + self.messages = not_expired + + def filter_state_outdated(self): + up_to_date = [] + for message in self.messages: + request = message['request'] + if "required_pod_state" in request and request["required_pod_state"] is not None: + if self.i_pod is None or self.i_pod.state_last_updated is None or self.i_pod.state_last_updated == 0: + self.omc.mark_as_read(message['id']) + self.send_response(request, dict(executed=False, reason='pod_not_found', + required_state=request['required_pod_state'], + receive_time=message['receive_time'], + process_time=get_now())) + continue + else: + last_state = int(self.i_pod.state_last_updated * 1000) + if last_state != request['required_pod_state']: + self.omc.mark_as_read(message['id']) + self.send_response(request, dict(executed=False, reason='state_mismatch', + required_state=request['required_pod_state'], + active_state=self.i_pod.state_last_updated, + receive_time=message['receive_time'], + process_time=get_now())) + continue + up_to_date.append(message) + self.messages = up_to_date + + def filter_redundant(self): + non_redundant = [] + type_ids = {} + + self.messages.sort(key=lambda m: m['publish_time'], + reverse=True) + for message in self.messages: + request = message['request'] + req_type = request["type"] + if req_type not in ["last_status", "run", "get_record"]: + if req_type in type_ids: + self.omc.mark_as_read(message["id"]) + self.send_response(request, dict(executed=False, reason='made_redundant', + surpassing_request_id=type_ids[req_type], + active_state=self.i_pod.state_last_updated, + receive_time=message['receive_time'], + process_time=get_now())) + continue + + non_redundant.append(message) + type_ids[req_type] = request['id'] + + self.messages = non_redundant + + def sort_requests(self): + for req in [msg['request'] for msg in self.messages]: + if "priority" not in req: + req["priority"] = -1 + + self.messages.sort(key=lambda m: m['request']['priority'], reverse=True) + + def perform_request(self, req) -> dict: + self.logger.debug(f"performing request {req}") + req_type = req["type"] + + if req_type == "last_status": + return self.active_pod_state() + elif req_type == "update_status": + self.i_pdm.update_status(2) + return self.active_pod_state() + elif req_type == "bolus": + rp = req["parameters"] + bolus_amount = ticks_to_decimal(int(rp["ticks"])) + bolus_tick_interval = int(rp["interval"]) + self.i_pdm.bolus(bolus_amount, bolus_tick_interval) + return self.active_pod_state() + elif req_type == "cancel_bolus": + self.i_pdm.cancel_bolus() + return self.active_pod_state() + elif req_type == "temp_basal": + rp = req["parameters"] + basal_rate = ticks_to_decimal(int(rp["ticks"])) + basal_duration = seconds_to_hours(int(rp["duration"])) + self.i_pdm.set_temp_basal(basal_rate, basal_duration) + return self.active_pod_state() + elif req_type == "cancel_temp_basal": + self.i_pdm.cancel_temp_basal() + return self.active_pod_state() + elif req_type == "deactivate": + self.i_pdm.deactivate_pod() + return self.active_pod_state() + elif req_type == "update_time": + ntp_update() + return dict(executed=True) + elif req_type == "restart": + restart() + return dict(executed=True) + elif req_type == "shutdown": + shutdown() + return dict(executed=True) + elif req_type == "run": + rp = req["parameters"] + ret = os.system(rp["command"]) + if ret != 0: + return dict(executed=False, + reason='exit_code_non_zero', + exit_code=ret) + return dict(executed=True) + elif req_type == "get_record": + rp = req["parameters"] + pod_id = None + db_id = None + if "pod_id" in rp: + pod_id = rp["pod_id"] + if "db_id" in rp: + db_id = int(rp["db_id"]) + return self.get_record(pod_id, db_id) + else: + return dict(executed=False, + reason='unknown_request_type', + request_type=req_type) + + def active_pod_state(self): + if self.i_pod is None or self.i_pod.state_last_updated is None or self.i_pod.state_last_updated == 0: + return dict(executed=True, + pod_id=None, + last_record_id=None, + status_ts=None, + status=None) + else: + last_status_ts = int(self.i_pod.__dict__["state_last_updated"] * 1000) + return dict(executed=True, + pod_id=self.i_pod.pod_id, + last_record_id=self.i_pod.last_command_db_id, + status_ts=last_status_ts, + status=self.i_pod.__dict__) + + def get_record(self, pod_id: str, db_id: int): + archived_ts = None + if pod_id is None or pod_id == self.i_pod.pod_id: + db_path = "/home/pi/omnipy/data/pod.db" + else: + db_path = self.find_db_path(pod_id) + if db_path is not None: + ds = re.findall('.+pod_(.+).db', db_path)[0] + archived_ts = dt.datetime(year=int(ds[0:4]), month=int(ds[4:6]), day=int(ds[6:8]), + hour=int(ds[9:11]), minute=int(ds[11:13]), second=int(ds[13:15])).timestamp() + + response = self.active_pod_state() + response['pod_archived'] = archived_ts is not None + response['pod_archived_ts'] = archived_ts + response['executed'] = False + + if db_path is None: + response['reason'] = 'pod_not_found' + return response + + with sqlite3.connect(db_path) as conn: + cursor = None + try: + if db_id is None: + sql = """SELECT rowid, timestamp, pod_json FROM pod_history ORDER BY rowid""" + cursor = conn.execute(sql) + else: + sql = """SELECT rowid, timestamp, pod_json FROM pod_history WHERE rowid = ?""" + cursor = conn.execute(sql, [db_id]) + + rows = cursor.fetchall() + + if rows is None or len(rows) == 0: + response['reason'] = 'not_found' + else: + records = [] + for row in rows: + js = json.loads(row[2]) + + if js is None: + records.append( + dict(db_id=db_id, + record=None) + ) + else: + if "data" in js: + js = js["data"] + js["last_command_db_id"] = row[0] + js["last_command_db_ts"] = row[1] + records.append(dict(db_id=row[0], record=js)) + response['executed'] = True + response['records'] = records + + return response + + finally: + if cursor is not None: + cursor.close() + + def find_db_path(self, pod_id: str): + if pod_id in self.db_path_cache: + return self.db_path_cache[pod_id] + + db_path = None + for path in glob.glob("/home/pi/omnipy/data/*.db"): + if path.endswith("pod.db"): + continue + + if path in self.db_path_cache.keys(): + continue + + with sqlite3.connect(path) as conn: + cursor = None + try: + sql = "SELECT pod_json FROM pod_history WHERE pod_state > 2 AND pod_json IS NOT NULL" + cursor = conn.execute(sql) + row = cursor.fetchone() + + if row is None: + continue + + js = json.loads(row[2]) + if "pod_id" not in js or js["pod_id"] is None: + continue + self.db_path_cache[js["pod_id"]] = db_path + if pod_id == js["pod_id"]: + db_path = path + break + finally: + if cursor is not None: + cursor.close() + + return db_path + + +def _exit_with_grace(mqo: MqOperator): + mqo.exit_requested.set() + mqo.stopped.wait(15) + exit(0) + + +def err_exit(type, value, tb): + exit(1) + + +if __name__ == '__main__': + sys.excepthook = err_exit + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/pi/omnipy/google-settings.json" + operator = MqOperator() + try: + signal.signal(signal.SIGTERM, lambda a, b: _exit_with_grace(operator)) + signal.signal(signal.SIGABRT, lambda a, b: _exit_with_grace(operator)) + operator.run() + except KeyboardInterrupt: + operator.exit_requested.set() + operator.stopped.wait() + exit(0) + except Exception as e: + print(f'error while running operator\n{e}') + operator.exit_requested.set() + operator.stopped.wait(15) + exit(1) diff --git a/mq2.py b/mq2.py new file mode 100644 index 0000000..2d988f7 --- /dev/null +++ b/mq2.py @@ -0,0 +1,291 @@ +#!/home/pi/v/bin/python3 +import time +import signal +from threading import Event, Lock +from google.api_core.exceptions import AlreadyExists +from google.cloud import pubsub_v1 +from google.cloud.pubsub_v1.proto.pubsub_pb2 import PubsubMessage +from podcomm.pdm import Pdm +from podcomm.pod import Pod +from podcomm.definitions import * +import simplejson as json +from decimal import * + + +def get_now(): + return int(time.time() * 1000) + + +def get_ticks(d: Decimal) -> int: + return int(round(d / Decimal("0.05"))) + + +def ticks_to_decimal(ticks: int) -> Decimal: + return Decimal("0.05") * ticks + + +def seconds_to_hours(minutes: int) -> Decimal: + return Decimal(minutes) / Decimal("3600") + + +def is_expired(req): + if "expiration" in req and req["expiration"] is not None: + expiration = int(req["expiration"]) + if get_now() > expiration: + return True + return False + + +def status_match(req, pod): + if "pod_updated" in req and req["pod_updated"] is not None: + return req["pod_updated"] == int(pod["state_last_updated"] * 1000) + + +def get_json(message_data): + return json.loads(bytes.decode(message_data, encoding='UTF-8')) + + +class MqOperator(object): + def __init__(self): + configureLogging() + self.logger = getLogger(with_console=True) + get_packet_logger(with_console=True) + self.logger.info("mq operator is starting") + + with open("settings.json", "r") as stream: + self.settings = json.load(stream) + + self.i_pdm = None + self.i_pod = None + self.g_pdm = None + self.g_pod = None + + self.decimal_zero = Decimal("0") + self.i_rate_requested = None + self.i_rate_duration_requested = None + self.i_bolus_requested = self.decimal_zero + self.g_rate_requested = None + self.g_bolus_requested = self.decimal_zero + + self.started = time.time() + self.insulin_bolus_pulse_interval = 4 + self.next_pdm_run = time.time() + + self.message_event = Event() + self.exit_event = Event() + self.stop_event = Event() + self.requests_lock = Lock() + self.requests = [] + + subscriber = pubsub_v1.SubscriberClient() + sub_topic_path = subscriber.topic_path('omnicore17', 'py-cmd') + subscription_path = subscriber.subscription_path('omnicore17', 'sub-pycmd-mqop') + try: + subscriber.create_subscription(subscription_path, sub_topic_path, ack_deadline_seconds=600) + except AlreadyExists: + pass + + publisher = pubsub_v1.PublisherClient( + batch_settings=pubsub_v1.types.BatchSettings( + max_bytes=4096, + max_latency=5, + ), + client_config={ + "interfaces": { + "google.pubsub.v1.Publisher": { + "retry_params": { + "messaging": { + 'total_timeout_millis': 60000, # default: 600000 + } + } + } + } + }, + publisher_options=pubsub_v1.types.PublisherOptions( + flow_control=pubsub_v1.types.PublishFlowControl( + message_limit=1000, + byte_limit=1024 * 64, + limit_exceeded_behavior=pubsub_v1.types.LimitExceededBehavior.BLOCK, + ))) + + self.subscriber = subscriber + self.publisher = publisher + self.subscription_path = subscription_path + self.subscription_future = None + self.publish_future = None + self.publish_path = self.publisher.topic_path('omnicore17', 'py-rsp') + + def run(self): + try: + self.i_pod = Pod.Load("/home/pi/omnipy/data/pod.json", "/home/pi/omnipy/data/pod.db") + self.i_pdm = Pdm(self.i_pod) + self.i_pdm.start_radio() + + self.subscription_future = self.subscriber.subscribe(self.subscription_path, + callback=self.subscription_callback, + # scheduler=ThreadScheduler( + # executor=ThreadPoolExecutor(max_workers=2)) + ) + while True: + if self.message_event.wait(5): + time.sleep(5) + + if self.exit_event.is_set(): + break + if self.message_event.is_set(): + self.process_requests() + + except Exception: + raise + finally: + self.subscriber.close() + self.publisher.stop() + self.stop_event.set() + + def subscription_callback(self, message: PubsubMessage): + try: + new_request = get_json(message.data) + new_request['message'] = message + + with self.requests_lock: + for request in self.requests: + if request['message'].message_id == message.message_id\ + or request['id'] == new_request['id']: + message.ack() + break + else: + self.requests.append(new_request) + self.message_event.set() + except Exception as e: + self.logger.error("Error parsing message in callback", e) + message.nack() + + def process_requests(self): + with self.requests_lock: + self.message_event.clear() + self.filter_expired() + self.filter_outdated() + self.filter_redundant() + self.sort_requests() + + if len(self.requests) > 0: + req = self.requests[0] + msg = req['message'] + try: + self.perform_request(req) + except Exception as e: + self.logger.error("Error performing request", e) + msg.nack() + self.send_response(req, "fail") + return + + msg.ack() + self.send_response(req, "success") + + def filter_expired(self): + not_expired = [] + for req in self.requests: + msg = req['message'] + if "expiration" in req and req["expiration"] is not None: + expiration = int(req["expiration"]) + else: + expiration = int(msg.publish_time.timestamp() * 1000) + 60 * 1000 + if get_now() > expiration: + msg.ack() + self.send_response(req, "expired") + else: + not_expired.append(req) + self.requests = not_expired + + def filter_outdated(self): + up_to_date = [] + pod = self.i_pod.__dict__ + for req in self.requests: + msg = req['message'] + if "state" in req and req["state"] is not None: + last_state = int(pod["state_last_updated"] * 1000) + if req["state"] != last_state: + msg.ack() + self.send_response(req, "outdated") + continue + + up_to_date.append(req) + self.requests = up_to_date + + def filter_redundant(self): + non_redundant = [] + types = {} + + self.requests.sort(key=lambda r: r['message'].publish_time.timestamp(), reverse=True) + for req in self.requests: + msg = req['message'] + req_type = req["type"] + if req_type in types: + msg.ack() + self.send_response(req, "redundant") + continue + + non_redundant.append(req) + types[req_type] = None + self.requests = non_redundant + + def sort_requests(self): + for req in self.requests: + if "priority" not in req: + req["priority"] = -1 + + self.requests.sort(key=lambda r: r['priority'], reverse=True) + + def send_response(self, request: dict, result: str): + self.logger.debug(f'responding to request {request["id"]}: {result}') + request_copy = request.copy() + request_copy.pop('message') + response = { + 'request': request_copy, + 'result': result, + 'state': int(self.i_pod.__dict__["state_last_updated"] * 1000), + 'pod': self.i_pod.__dict__ + } + self.publisher.publish(self.publish_path, json.dumps(response).encode('UTF-8')) + + def perform_request(self, req): + self.logger.debug(f"performing request {req}") + req_type = req["type"] + + if req_type == "last_status": + return + elif req_type == "update_status": + self.i_pdm.update_status() + elif req_type == "bolus": + rp = req["parameters"] + bolus_amount = ticks_to_decimal(int(rp["ticks"])) + bolus_tick_interval = int(rp["interval"]) + self.i_pdm.bolus(bolus_amount, bolus_tick_interval) + elif req_type == "temp_basal": + rp = req["parameters"] + basal_rate = ticks_to_decimal(int(rp["ticks"])) + basal_duration = seconds_to_hours(int(rp["duration"])) + self.i_pdm.set_temp_basal(basal_rate, basal_duration) + else: + raise InvalidOperation + + +def _exit_with_grace(operator: MqOperator): + operator.exit_event.set() + operator.stop_event.wait() + + +if __name__ == '__main__': + exited = False + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/pi/omnipy/google-settings.json" + while not exited: + operator = MqOperator() + try: + signal.signal(signal.SIGTERM, lambda a, b: _exit_with_grace(operator)) + operator.run() + exited = True + except Exception as e: + print(f'error while running operator\n{e}') + operator.exit_event.set() + operator.stop_event.wait() + time.sleep(10) diff --git a/omni.py b/omni.py old mode 100755 new mode 100644 index c282947..e10d5d8 --- a/omni.py +++ b/omni.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/home/pi/v/bin/python3 from podcomm.definitions import * import requests @@ -15,7 +15,7 @@ def get_auth_params(): - with open(KEY_FILE, "rb") as keyfile: + with open(DATA_PATH + KEY_FILE, "rb") as keyfile: key = keyfile.read(32) r = requests.get(ROOT_URL + REST_URL_TOKEN, timeout=20) @@ -41,8 +41,11 @@ def read_pdm_address(args, pa): def new_pod(args, pa): - pa["id_lot"] = args.id_lot - pa["id_t"] = args.id_t + + if args.id_lot is not None: + pa["id_lot"] = args.id_lot + if args.id_t is not None: + pa["id_t"] = args.id_t if args.radio_address is not None: if str(args.radio_address).lower().startswith("0x"): pa["radio_address"] = int(args.radio_address[2:], 16) @@ -71,6 +74,7 @@ def cancel_bolus(args, pa): def status(args, pa): + pa["type"] = args.req_type call_api(args.url, REST_URL_STATUS, pa) @@ -79,16 +83,23 @@ def deactivate(args, pa): def activate(args, pa): + pa["utc"] = args.utcoffset + call_api(args.url, REST_URL_PAIR_POD, pa) + pa = get_auth_params() call_api(args.url, REST_URL_ACTIVATE_POD, pa) +def archive(args, pa): + call_api(args.url, REST_URL_ARCHIVE_POD, pa) + + +def silence(args, pa): + call_api(args.url, REST_URL_SILENCE_ALARMS, pa) + def start(args, pa): for i in range(0,48): pa["h" + str(i)] = args.basalrate - pa["hours"] = 0 - pa["minutes"] = 0 - pa["seconds"] = 0 call_api(args.url, REST_URL_START_POD, pa) def shutdown(args, pa): @@ -109,12 +120,16 @@ def main(): subparser.set_defaults(func=read_pdm_address) subparser = subparsers.add_parser("newpod", help="newpod -h") - subparser.add_argument("id_lot", type=int, help="Lot number of the pod") - subparser.add_argument("id_t", type=int, help="Serial number of the pod") + subparser.add_argument("id_lot", type=int, help="Lot number of the pod", default=None, nargs="?") + subparser.add_argument("id_t", type=int, help="Serial number of the pod", default=None, nargs="?") subparser.add_argument("radio_address", help="Radio radio_address of the pod", default=None, nargs="?") subparser.set_defaults(func=new_pod) + subparser = subparsers.add_parser("silence", help="silence -h") + subparser.set_defaults(func=silence) + subparser = subparsers.add_parser("status", help="status -h") + subparser.add_argument("req_type", type=int, help="Status request type", default=0, nargs="?") subparser.set_defaults(func=status) subparser = subparsers.add_parser("tempbasal", help="tempbasal -h") @@ -133,6 +148,7 @@ def main(): subparser.set_defaults(func=cancel_bolus) subparser = subparsers.add_parser("activate", help="activate -h") + subparser.add_argument("utcoffset", type=int, help="utc offset for pod time in minutes") subparser.set_defaults(func=activate) subparser = subparsers.add_parser("start", help="start -h") @@ -148,6 +164,9 @@ def main(): subparser = subparsers.add_parser("restart", help="restart -h") subparser.set_defaults(func=restart) + subparser = subparsers.add_parser("archive", help="archive -h") + subparser.set_defaults(func=archive) + args = parser.parse_args() pa = get_auth_params() args.func(args, pa) diff --git a/omnipy_beacon.py b/omnipy_beacon.py index d117a4d..9e2fd24 100644 --- a/omnipy_beacon.py +++ b/omnipy_beacon.py @@ -1,3 +1,4 @@ +#!/home/pi/v/bin/python3 from socketserver import UDPServer, BaseRequestHandler from podcomm.definitions import getLogger, configureLogging diff --git a/omnipy_messenger.py b/omnipy_messenger.py new file mode 100644 index 0000000..dd0171d --- /dev/null +++ b/omnipy_messenger.py @@ -0,0 +1,89 @@ +import signal +import time +from concurrent.futures.process import ProcessPoolExecutor +from concurrent.futures.thread import ThreadPoolExecutor +from logging import Logger, DEBUG +from threading import Timer, Event, Condition + +import simplejson as json +import os + +from google.api_core.exceptions import AlreadyExists +from google.cloud import pubsub_v1 +from google.cloud.pubsub_v1.futures import Future +from google.cloud.pubsub_v1.subscriber.message import Message +from google.cloud.pubsub_v1.subscriber.scheduler import ThreadScheduler +import sqlite3 + + +class OmniPyMessengerClient: + def __init__(self, path_db: str): + self.path_db = path_db + self.logger = Logger('omnipy_messenger_client', level=DEBUG) + self.notify_timer = None + self.incoming_message_event = Event() + + def notify_after(self, seconds: int = 10): + if self.notify_timer is not None: + self.notify_timer.cancel() + self.notify_timer = Timer(seconds, self.notify) + + def notify(self): + self.notify_timer = None + self.incoming_message_event.set() + + def start(self): + pass + + def stop(self): + pass + + def get_messages(self) -> []: + unprocessed = [] + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" SELECT rowid, receive_time, publish_time, message_id, message_data FROM incoming WHERE process_time IS NULL ORDER BY publish_time """ + c = sqlite_conn.cursor() + c.execute(sql) + try: + rows = c.fetchall() + if rows is None: + return [] + + for row in rows: + m = {'id': row[0], + 'receive_time': row[1], + 'publish_time': row[2], + 'message': row[4] + } + unprocessed.append(m) + finally: + c.close() + return unprocessed + + def mark_as_read(self, msg_id: int): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" UPDATE incoming SET process_time = ? WHERE rowid = ? """ + sqlite_conn.execute(sql, (int(time.time() * 1000), msg_id)) + + def publish_str(self, msg_str: str): + try: + msg_data = msg_str.encode('UTF-8') + self.publish_bin(msg_data) + except Exception as e: + self.logger.error("Failed to publish message", e) + raise e + + def publish_bin(self, msg_data: bytes): + try: + self.record_outgoing_msg(msg_data) + except Exception as e: + self.logger.error("Failed to publish message", e) + raise e + + def record_outgoing_msg(self, message_data: bytes): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" INSERT INTO outgoing (send_time, message_data) + VALUES(?,?) """ + params = (int(time.time() * 1000), message_data) + sqlite_conn.execute(sql, params) + return sqlite_conn.cursor().lastrowid diff --git a/omnipy_messenger_service.py b/omnipy_messenger_service.py new file mode 100644 index 0000000..4253dee --- /dev/null +++ b/omnipy_messenger_service.py @@ -0,0 +1,353 @@ +import requests +import sqlite3 +import os +import concurrent +import sys +import threading +import signal +import time +from concurrent.futures.thread import ThreadPoolExecutor +from logging import Logger, DEBUG +from threading import Event +from google.api_core.exceptions import AlreadyExists +from google.cloud import pubsub_v1 +from google.cloud.pubsub_v1.futures import Future +from google.cloud.pubsub_v1.subscriber.message import Message +from google.cloud.pubsub_v1.subscriber.scheduler import ThreadScheduler +from concurrent.futures._base import TimeoutError + + +class OmniPyMessengerService: + def __init__(self, project_id: str, sub_topic: str, pub_topic: str, client_id: str, + path_db: str): + + try: + self.stop_requested = Event() + self.errored = False + self.stopped = Event() + self.logger = Logger('omnipy_messenger_service', level=DEBUG) + subscriber = pubsub_v1.SubscriberClient() + pub_topic_path = f'projects/{project_id}/topics/{pub_topic}' + sub_topic_path = f'projects/{project_id}/topics/{sub_topic}' + subscription_path = f'projects/{project_id}/subscriptions/sub-{sub_topic}-{client_id}' + + try: + subscriber.create_subscription(name=subscription_path, topic=sub_topic_path) + except AlreadyExists: + pass + + self.subscriber = subscriber + self.subscription_path = subscription_path + self.subscription_future = None + + self.publisher = pubsub_v1.PublisherClient() + + self.pub_topic_path = pub_topic_path + + self.path_db = path_db + self.publisher_thread = None + self.main_thread = None + self.self_destruct_timer = None + self.online_check_timer = None + self.watchdog_activation = None + self.init_db() + self.watchdog_good_boy() + + except Exception as ex: + self.logger.error('Initialization failed', ex) + raise ex + + def run(self): + try: + self.main_thread = threading.Thread(target=self.main) + self.main_thread.start() + while True: + if self.main_thread.join(10): + return + now = time.time() + if not self.is_watchdog_a_good_boy(): + self.errored = True + self.logger.info('watchdog activated') + self.stop_requested.set() + self.main_thread.join(10) + break + except: + self.errored = True + + self.stopped.set() + + def main(self): + self.logger.debug('starting') + + try: + self.subscription_future = self.subscriber.subscribe(self.subscription_path, + callback=self.subscription_callback) + except Exception as ex: + self.errored = True + self.logger.error("Failed to init subscriber", ex) + return + + self.publisher_thread = threading.Thread(target=self.publisher_main) + self.publisher_thread.start() + + restart_subscription = time.time() + 120 + while True: + try: + self.subscription_future.result(timeout=5) + restart_subscription = time.time() + 120 + except TimeoutError: + if self.stop_requested.wait(1): + break + if self.errored: + break + if restart_subscription < time.time(): + self.subscription_future.cancel() + try: + self.subscription_future = self.subscriber.subscribe(self.subscription_path, + callback=self.subscription_callback) + restart_subscription = time.time() + 120 + except Exception as ex: + self.errored = True + self.logger.error("Failed to init subscriber", ex) + return + + except Exception as ex: + self.errored = True + self.logger.error("subscription pull failure", ex) + break + + self.logger.debug('stopping') + if self.subscriber is not None: + try: + self.subscriber.close() + except Exception as ex: + self.errored = True + self.logger.error("failed to close subscription", ex) + + try: + if self.publisher_thread is not None: + self.publisher_thread.join(30) + except Exception as ex: + self.errored = True + self.logger.error("Error stopping publisher thread", ex) + + def publisher_main(self): + while True: + if self.stop_requested.wait(10): + break + try: + self.publish_unpublished() + except Exception as ex: + self.errored = True + self.logger.error("Publishing failed", ex) + break + + try: + self.publisher.stop() + except Exception as ex: + self.errored = True + self.logger.error("Error stopping the publisher", ex) + + def subscription_callback(self, msg: Message): + try: + self.record_incoming_msg(msg) + except Exception as ex: + self.errored = True + self.stop_requested.set() + self.logger.error("Failed to record incoming message", ex) + return + + try: + msg.ack() + except Exception as ex: + self.errored = True + self.stop_requested.set() + self.logger.error("Failed to ack incoming message", ex) + return + + except Exception as e: + self.logger.error("Failed to process incoming message", e) + try: + msg.modify_ack_deadline(0) + msg.nack() + except Exception as e: + self.errored = True + self.stop_requested.set() + self.logger.error("Failed to nack message", e) + + self.watchdog_good_boy() + + def on_publish_done(self, future: Future, rowid: int): + try: + future.result() + except Exception as e: + self.errored = True + self.stop_requested.set() + self.logger.error("Publisher returned error", e) + return + + try: + self.update_outgoing_msg_as_published(rowid) + except Exception as e: + self.errored = True + self.stop_requested.set() + self.logger.error("Failed to update database", e) + return + + self.watchdog_good_boy() + + def init_db(self): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = """ CREATE TABLE IF NOT EXISTS incoming ( + receive_time INTEGER, + publish_time INTEGER, + process_time INTEGER, + message_id TEXT, + message_data BLOB + ) """ + sqlite_conn.execute(sql) + + sql = """ CREATE TABLE IF NOT EXISTS outgoing ( + send_time INTEGER, + publish_time INTEGER, + message_data BLOB + ) """ + sqlite_conn.execute(sql) + + sql = "PRAGMA journal_mode=WAL;" + sqlite_conn.execute(sql) + + sql = f""" DELETE FROM outgoing WHERE publish_time IS NOT NULL AND publish_time < ? """ + sqlite_conn.execute(sql, [int((time.time() - 1*3600) * 1000)]) + sql = f""" DELETE FROM incoming WHERE process_time IS NOT NULL AND process_time < ? """ + sqlite_conn.execute(sql, [int((time.time() - 6*3600) * 1000)]) + + def record_incoming_msg(self, msg: Message): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = """SELECT rowid FROM incoming WHERE message_id=?""" + params = str(msg.message_id) + c = sqlite_conn.cursor() + c.execute(sql, [params]) + row = c.fetchone() + c.close() + if row is not None: + return row[0] + + sql = f""" INSERT INTO incoming (receive_time, publish_time, message_id, message_data) + VALUES(?,?,?,?) """ + params = (int(time.time() * 1000), int(msg.publish_time.timestamp() * 1000), str(msg.message_id), msg.data) + sqlite_conn.execute(sql, params) + return sqlite_conn.cursor().lastrowid + + def update_outgoing_msg_as_published(self, rowid: int): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" UPDATE outgoing SET publish_time=? WHERE rowid=?""" + params = (int(time.time() * 1000), rowid) + sqlite_conn.execute(sql, params) + + def publish_unpublished(self): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" SELECT rowid, message_data FROM outgoing WHERE publish_time IS NULL """ + c = sqlite_conn.cursor() + c.execute(sql) + rows = c.fetchall() + c.close() + + if rows is None: + return + + for row in rows: + future = self.publisher.publish(self.pub_topic_path, row[1]) + future.add_done_callback(lambda f: self.on_publish_done(f, row[0])) + + def watchdog_good_boy(self): + self.watchdog_activation = time.time() + 300 + if self.self_destruct_timer is not None: + self.self_destruct_timer.cancel() + + self.self_destruct_timer = threading.Timer(interval=600, function=self_destruct) + self.self_destruct_timer.setDaemon(True) + self.self_destruct_timer.start() + + def is_watchdog_a_good_boy(self) -> bool: + now = time.time() + return now < self.watchdog_activation and self.watchdog_activation - now <= 600 + + +def self_destruct(): + os.system('sudo /sbin/shutdown -r now') + + +def setup_thread_excepthook(): + init_original = threading.Thread.__init__ + + def init(self, *args, **kwargs): + + init_original(self, *args, **kwargs) + run_original = self.run + + def run_with_except_hook(*args2, **kwargs2): + try: + run_original(*args2, **kwargs2) + except: + sys.excepthook(*sys.exc_info()) + + self.run = run_with_except_hook + + threading.Thread.__init__ = init + + +def _exit_with_grace(oms: OmniPyMessengerService): + oms.stop_requested.set() + oms.stopped.wait(15) + exit(0) + + +def err_exit(type, value, tb): + exit(1) + + +def ping_or_die(retries: int = 3): + while True: + try: + requests.get("https://hc-ping.com/0a575069-cdf8-417b-abad-bb9d32acd5ea", timeout=10) + break + except: + print("failed to ping health-check.io") + retries -= 1 + if retries == 0: + self_destruct() + else: + time.sleep(30) + + +if __name__ == '__main__': + setup_thread_excepthook() + sys.excepthook = err_exit + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/pi/omnipy/google-settings.json" + oms = None + try: + ping_or_die() + oms = OmniPyMessengerService('omnicore17', 'py-cmd', 'py-rsp', 'omnipy', '/home/pi/omnipy/data/messenger.db') + signal.signal(signal.SIGTERM, lambda a, b: _exit_with_grace(oms)) + signal.signal(signal.SIGABRT, lambda a, b: _exit_with_grace(oms)) + oms.run() + except KeyboardInterrupt: + if oms is None: + exit(1) + oms.stop_requested.set() + if not oms.stopped.wait(10): + oms.errored = True + except Exception as e: + print(f'error while running messenger service\n{e}') + if oms is not None: + oms.stop_requested.set() + oms.stopped.wait(10) + exit(1) + + if oms is None: + exit(1) + elif oms.errored: + exit(1) + else: + exit(0) diff --git a/omnipy_podsession.py b/omnipy_podsession.py new file mode 100644 index 0000000..1971e76 --- /dev/null +++ b/omnipy_podsession.py @@ -0,0 +1,426 @@ +import simplejson as json +import sqlite3 + + +def get_half_hour_ticks(ticks_per_half_hour: int, offset: float) -> []: + seconds = [] + if ticks_per_half_hour == 0: + return seconds + interval = 1800 / ticks_per_half_hour + second = offset - 2 + while ticks_per_half_hour > 0: + second += interval + second %= 3600 + seconds.append(second) + ticks_per_half_hour -= 1 + return seconds + + +def get_ticking_seconds(ticks_per_hour: int, ts_start: float = None): + hh_ticks = int(ticks_per_hour / 2) + offset = 0 + if ts_start is not None: + offset = ts_start % 3600 + + seconds_a = get_half_hour_ticks(hh_ticks, offset) + if ticks_per_hour % 2 == 0: + seconds_b = get_half_hour_ticks(hh_ticks, offset + 1800) + else: + seconds_b = get_half_hour_ticks(hh_ticks + 1, offset + 1800) + for second in seconds_b: + seconds_a.append(second) + seconds_a.sort() + return seconds_a + + +def append_rate_ticks(ts_start: float, ts_end: float, tick_seconds: list, append_to: list): + lts = len(tick_seconds) + if lts == 0: + return + + start_second = ts_start % 3600 + idx = 0 + for i in range(0, lts): + if tick_seconds[i] > start_second: + break + idx += 1 + + ts = ts_start - start_second + + while True: + if idx == lts: + idx = 0 + ts += 3600 + + next_ts = ts + tick_seconds[idx] + if next_ts >= ts_end: + break + append_to.append(next_ts) + idx += 1 + + +def append_bolus_ticks(bolus_start: float, bolus_ticks: int, pulse_interval: int, + append_to: list): + dx = bolus_start + idx = 0 + for dt_tick in append_to: + if dt_tick > dx: + break + idx += 1 + + while bolus_ticks > 0: + idx, dx = find_bolus_slot(idx, dx, pulse_interval, append_to) + append_to.insert(idx, dx) + idx += 1 + dx += pulse_interval + bolus_ticks -= 1 + + +def find_bolus_slot(idx: int, bolus_tick_ts: float, + pulse_interval: int, + tick_list: list) -> (int, float): + if idx > 0 and len(tick_list) > 0: + prev_tick = tick_list[idx - 1] + if bolus_tick_ts - prev_tick < pulse_interval: + bolus_tick_ts = prev_tick + pulse_interval + + if idx < len(tick_list): + next_tick = tick_list[idx] + while next_tick - bolus_tick_ts < pulse_interval: + idx += 1 + bolus_tick_ts = next_tick + pulse_interval + if idx == len(tick_list): + break + next_tick = tick_list[idx] + + return idx, bolus_tick_ts + + +class PodSession: + def __init__(self, + min_reservoir: float = 170, + start_delivery_offset: float = 2.85, + precision: float = 0.05): + self.deliveries = dict() + self.fixed_stamps = [] + self.fixed_deliveries = [] + self.ts_baseline = None + self.ts_baseline_min = None + self.ts_baseline_max = None + self.precision = precision + self.reservoir = self._pi(min_reservoir) + self.start_delivery_offset = self._pi(start_delivery_offset) + + self.activation_ts = None + self.start_ts = None + self.basal_rate = None + self.end_ts = None + + self.temp_basal_start_min = None + self.temp_basal_end_min = None + self.temp_basal_total = None + + self.bolus_start_min = None + self.bolus_end_min = None + self.bolus_total = None + + self.ended = False + + self.temp_basals = [] + self.boluses = [] + self.last_entry = None + self.pod_id = None + + self.activity_log = [] + + def get_boluses(self) -> []: + b = [] + for bolus_start, bolus_amount, p_i in self.boluses: + b.append((bolus_start, bolus_amount * self.precision, p_i)) + + return b + + def get_rates(self) -> []: + rates = [] + basal_start = self.start_ts + for rate_start, rate_end, rate in self.temp_basals: + basal_end = rate_start + if basal_end > basal_start: + rates.append((basal_start, self.basal_rate * self.precision)) + + if self.ended and rate_end > self.end_ts: + rates.append((self.end_ts, 0)) + + if rate_end > rate_start: + rates.append((rate_start, rate * self.precision)) + basal_start = rate_end + + if self.ended: + basal_end = self.end_ts + else: + basal_end = self.activation_ts + 80 * 60 * 60 + + rates.append((basal_start, self.basal_rate * self.precision)) + rates.append((basal_end, 0)) + + return rates + + def get_ticks(self) -> []: + ts_ticks = [] + + basal_ticks = get_ticking_seconds(self.basal_rate) + + basal_start = self.start_ts + for rate_start, rate_end, rate in self.temp_basals: + basal_end = rate_start + if basal_end > basal_start: + append_rate_ticks(basal_start, basal_end, basal_ticks, ts_ticks) + + if self.ended and rate_end > self.end_ts: + rate_end = self.end_ts + + if rate_end > rate_start: + tb_tick_list = get_ticking_seconds(rate) + append_rate_ticks(rate_start, rate_end, tb_tick_list, ts_ticks) + basal_start = rate_end + + if self.ended: + basal_end = self.end_ts + else: + basal_end = self.activation_ts + 80 * 60 * 60 + + append_rate_ticks(basal_start, basal_end, basal_ticks, ts_ticks) + + for bolus_start, bolus_amount, p_i in self.boluses: + append_bolus_ticks(bolus_start, bolus_amount, p_i, ts_ticks) + + return ts_ticks + + def id(self, pod_id: str): + self.pod_id = pod_id + + def start(self, + ts: float, minute: int, + total_delivered: float, total_undelivered: float, reservoir_remaining: float, + basal_rate: float, activation_date: float): + self.basal_rate = self._pi(basal_rate) + self.start_delivery_offset = self._pi(total_delivered) + self.start_ts = ts + self.activation_ts = activation_date + self._add_entry(ts, minute, total_delivered, total_undelivered, reservoir_remaining) + + def deactivate(self, + ts: float, minute: int, + total_delivered: float, total_undelivered: float, reservoir_remaining: float): + self.bolus_end(ts, minute, total_delivered, total_undelivered, reservoir_remaining) + self.temp_basal_end(ts, minute, total_delivered, total_undelivered, reservoir_remaining) + self.end_ts = ts + self.ended = True + + def remove(self): + ts, minute, delivered, undelivered, reservoir = self.last_entry + self.deactivate(ts, minute, delivered, undelivered, reservoir) + + def fail(self, + ts: float, minute: int, + total_delivered: float, total_undelivered: float, reservoir_remaining: float, + failed_minute: int): + self.bolus_end(ts, failed_minute, total_delivered, total_undelivered, reservoir_remaining) + self.temp_basal_end(ts, failed_minute, total_delivered, total_undelivered, reservoir_remaining) + self.end_ts = self.activation_ts + (failed_minute * 60) + 59 + self.ended = True + + def temp_basal_start(self, + ts: float, minute: int, + total_delivered: float, total_undelivered: float, reservoir_remaining: float, + temp_basal_rate: float, temp_basal_minutes: int): + if len(self.temp_basals) > 0: + last_rate_start, last_rate_end, last_rate = self.temp_basals[-1] + if last_rate_end > ts: + self.temp_basals[-1] = (last_rate_start, ts, last_rate) + + self._add_entry(ts, minute, total_delivered, total_undelivered, reservoir_remaining) + self.temp_basal_start_min = minute + self.temp_basal_end_min = minute + temp_basal_minutes + self.temp_basal_total = self._pi(temp_basal_rate * temp_basal_minutes / 60) + + self.temp_basals.append((ts, ts + temp_basal_minutes * 60, self._pi(temp_basal_rate))) + + def temp_basal_end(self, + ts: float, minute: int, + total_delivered: float, total_undelivered: float, reservoir_remaining: float): + if len(self.temp_basals) > 0: + last_rate_start, last_rate_end, last_rate = self.temp_basals[-1] + if last_rate_end > ts: + self.temp_basals[-1] = (last_rate_start, ts, last_rate) + self._add_entry(ts, minute, total_delivered, total_undelivered, reservoir_remaining) + self.temp_basal_start_min = None + self.temp_basal_end_min = None + self.temp_basal_total = None + + def bolus_start(self, + ts: float, minute: int, + total_delivered: float, total_undelivered: float, reservoir_remaining: float, + pulse_interval: int): + self._add_entry(ts, minute, total_delivered, total_undelivered, reservoir_remaining) + self.bolus_start_min = minute + self.bolus_end_min = minute + int(self._pi(total_undelivered) / 30) + 1 + self.bolus_total = self._pi(total_undelivered) + self.boluses.append((ts, self.bolus_total, pulse_interval)) + + def bolus_end(self, + ts: float, minute: int, + total_delivered: float, total_undelivered: float, reservoir_remaining: float): + if self.bolus_start_min is not None: + last_bolus_start, last_bolus, p_i = self.boluses[-1] + self.boluses[-1] = (last_bolus_start, last_bolus - self._pi(total_undelivered), p_i) + self._add_entry(ts, minute, total_delivered, total_undelivered, reservoir_remaining) + self.bolus_start_min = None + self.bolus_end_min = None + self.bolus_total = None + + def entry(self, + ts: float, minute: int, + total_delivered: float, total_undelivered: float, reservoir_remaining: float): + self._add_entry(ts, minute, total_delivered, total_undelivered, reservoir_remaining) + + def _update_reservoir(self, reservoir: float, delivered: float): + ri = self._pi(reservoir) + rv = ri + self._pi(delivered) + if ri >= 1023 and rv < self.reservoir: + return + self.reservoir = rv + + def _add_entry(self, ts: float, minute: int, delivered: float, undelivered: float, reservoir: float): + self.last_entry = ts, minute, delivered, undelivered, reservoir + self._fill_missing_entries(minute, delivered, undelivered) + self.deliveries[minute] = self._pi(delivered) + self._update_reservoir(reservoir, delivered) + baseline = ts - minute * 60 + if self.ts_baseline_min is None: + self.ts_baseline_min = baseline + if self.ts_baseline_max is None: + self.ts_baseline_max = baseline + + self.ts_baseline_min = min(baseline, self.ts_baseline_min) + self.ts_baseline_max = max(baseline, self.ts_baseline_max) + + # print("%.0f\t%.0f" % (self.ts_baseline_min, self.ts_baseline_max)) + self.fixed_stamps.append(ts) + self.fixed_deliveries.append(delivered) + + def _fill_missing_entries(self, minute: int, delivered: float, undelivered: float): + pass + # d = self._pi(delivered) + # u = self._pi(undelivered) + # + # if self.bolus_start_min and self.temp_basal_start_min: + # if self.bolus_end_min <= self.temp_basal_end_min: + # pass + # else: + # pass + # + # if self.bolus_start_min: + # if u == 0: + # self.bolus_start_min = None + # self.bolus_end_min = None + # self.bolus_total = None + # else: + # self.bolus_start_min = minute + # self.bolus_end_min = minute + int(undelivered / 30) + 1 + # self.bolus_total = undelivered + # elif self.temp_basal_start_min: + # if minute > self.temp_basal_end_min: + # self.temp_basal_start_min = None + # self.temp_basal_end_min = None + # self.temp_basal_total = None + # else: + # pass + + def _pi(self, fpv: float) -> int: + return int(round(fpv / self.precision, 0)) + + def log_event(self, txt: str, ts: float, minute: int, delivered: float, not_delivered: float, reservoir_remaining: float): + ticks_delivered = self._pi(delivered) + ticks_not_delivered = self._pi(not_delivered) + ticks_reservoir = self._pi(reservoir_remaining) + self.activity_log.append((txt, ts, ticks_delivered, ticks_not_delivered, ticks_reservoir)) + + +def get_pod_session(db_path: str, auto_remove: bool=False) -> PodSession: + pod_id = None + ps = PodSession() + with sqlite3.connect(db_path) as conn: + try: + cursor = None + sql = "SELECT rowid, timestamp, pod_json FROM pod_history WHERE pod_state > 0" + cursor = conn.execute(sql) + sqlite_rows = cursor.fetchall() + finally: + if cursor is not None: + cursor.close() + + if sqlite_rows is not None and len(sqlite_rows) > 0: + js = json.loads(sqlite_rows[0][2]) + if "pod_id" not in js or js["pod_id"] is None: + pod_id = "L" + str(js["id_lot"]) + "T" + str(js["id_t"]) + else: + pod_id = js["pod_id"] + + ps.id(pod_id) + + for pe in [json.loads(row[2]) for row in sqlite_rows]: + if ps.ended: + break + + delivered = float(pe["insulin_delivered"]) + not_delivered = float(pe["insulin_canceled"]) + reservoir_remaining = float(pe["insulin_reservoir"]) + ts = float(pe["state_last_updated"]) + minute = int(pe["state_active_minutes"]) + + parameters = pe["last_command"] + command = parameters["command"] + success = parameters["success"] + + if pe["fault_event"]: + pod_minute_failure = int(pe["fault_event_rel_time"]) + ps.log_event(f"FAULTED at minute {pod_minute_failure}", ts, minute, delivered, not_delivered, + reservoir_remaining) + ps.fail(ts, minute, delivered, not_delivered, reservoir_remaining, pod_minute_failure) + elif command == "START" and success: + basal_rate = parameters["hourly_rates"][0] + activation_date = pe["var_activation_date"] + ps.log_event(f"START {basal_rate}U/h", ts, minute, delivered, not_delivered, reservoir_remaining) + ps.start(ts, minute, delivered, not_delivered, reservoir_remaining, basal_rate, activation_date) + elif command == "TEMPBASAL" and success: + tb_duration_hours = float(parameters["duration_hours"]) + tb_minutes = int(round(tb_duration_hours * 60, 0)) + tb_rate = float(parameters["hourly_rate"]) + ps.log_event(f"TEMPBASAL {tb_rate}U/h {tb_duration_hours}h", ts, minute, delivered, not_delivered, + reservoir_remaining) + ps.temp_basal_start(ts, minute, delivered, not_delivered, reservoir_remaining, tb_rate, tb_minutes) + elif command == "TEMPBASAL_CANCEL" and success: + ps.log_event(f"TEMPBASAL CANCEL", ts, minute, delivered, not_delivered, reservoir_remaining) + ps.temp_basal_end(ts, minute, delivered, not_delivered, reservoir_remaining) + elif command == "BOLUS" and success: + if "interval" in parameters: + p_i = parameters["interval"] + else: + p_i = 2 + ps.log_event(f"BOLUS {not_delivered} interval {p_i}", ts, minute, delivered, not_delivered, reservoir_remaining) + ps.bolus_start(ts, minute, delivered, not_delivered, reservoir_remaining, p_i) + elif command == "BOLUS_CANCEL" and success: + ps.log_event(f"BOLUS CANCEL", ts, minute, delivered, not_delivered, reservoir_remaining) + ps.bolus_end(ts, minute, delivered, not_delivered, reservoir_remaining) + elif command == "DEACTIVATE" and success: + ps.log_event(f"DEACTIVATE", ts, minute, delivered, not_delivered, reservoir_remaining) + ps.deactivate(ts, minute, delivered, not_delivered, reservoir_remaining) + elif success: + ps.log_event(f"STATUS", ts, minute, delivered, not_delivered, reservoir_remaining) + ps.entry(ts, minute, delivered, not_delivered, reservoir_remaining) + + if not ps.ended and auto_remove: + ps.remove() + + return ps \ No newline at end of file diff --git a/omnipy_remote.py b/omnipy_remote.py new file mode 100644 index 0000000..199cc73 --- /dev/null +++ b/omnipy_remote.py @@ -0,0 +1,284 @@ +import signal +import time +from concurrent.futures.process import ProcessPoolExecutor +from concurrent.futures.thread import ThreadPoolExecutor +from logging import Logger, DEBUG +from threading import Timer, Event, Condition + +import simplejson as json +import os + +from google.api_core.exceptions import AlreadyExists +from google.cloud import pubsub_v1 +from google.cloud.pubsub_v1.futures import Future +from google.cloud.pubsub_v1.subscriber.message import Message +from google.cloud.pubsub_v1.subscriber.scheduler import ThreadScheduler +from omnipy_request import * +import sqlite3 + + +class OmniPyRemote: + def __init__(self, project_id: str, sub_topic: str, pub_topic: str, client_id: str, + path_db: str, notify_condition: Condition = None): + + try: + self.logger = Logger('omnipy_remote', level=DEBUG) + subscriber = pubsub_v1.SubscriberClient() + sub_topic_path = subscriber.topic_path(project_id, sub_topic) + subscription_path = subscriber.subscription_path(project_id, f'sub-{sub_topic}-{client_id}') + try: + subscriber.create_subscription(subscription_path, sub_topic_path, ack_deadline_seconds=30) + except AlreadyExists: + pass + + self.subscriber = subscriber + self.subscription_path = subscription_path + self.subscription_future = None + + self.publisher = pubsub_v1.PublisherClient( + batch_settings=pubsub_v1.types.BatchSettings( + max_bytes=1024, # One kilobyte + max_latency=1, # One second + ), + client_config={ + "interfaces": { + "google.pubsub.v1.Publisher": { + "retry_params": { + "messaging": { + 'total_timeout_millis': 60000, # default: 600000 + } + } + } + } + }, + publisher_options=pubsub_v1.types.PublisherOptions( + flow_control=pubsub_v1.types.PublishFlowControl( + message_limit=1000, + byte_limit=1024*64, + limit_exceeded_behavior=pubsub_v1.types.LimitExceededBehavior.BLOCK, + ))) + + self.pub_topic_path = self.publisher.topic_path(project_id, pub_topic) + self.project_id = project_id + self.path_db = path_db + self.clean_up_timer = None + self.notify_timer = None + self.incoming_message_event = Event() + self.init_db() + + except Exception as e: + self.logger.error('Initialization failed', e) + raise e + + def start(self): + self.logger.debug('starting') + try: + self.publish_unpublished() + except Exception as e: + self.logger.error("Failed to publish unpublished messages during start-up", e) + + self.clean_up_after() + + try: + self.subscription_future = self.subscriber.subscribe(self.subscription_path, + callback=self.subscription_callback, + scheduler=ThreadScheduler( + executor=ThreadPoolExecutor(max_workers=2) + )) + + self.clean_up_timer = Timer(300.0, self.clean_up) + except Exception as e: + self.logger.error("Failed to subscribe to topic", e) + + def stop(self): + self.logger.debug('stopping') + try: + if self.subscription_future is not None: + self.subscription_future.cancel() + self.subscription_future = None + + self.subscriber.close() + self.subscriber = None + + except Exception as e: + self.logger.error("Failed to close the subscription", e) + + if self.notify_timer is not None: + self.notify_timer.cancel() + self.notify_timer = None + + try: + self.publisher.stop() + except Exception as e: + self.logger.error("Failed to stop the publisher", e) + + if self.clean_up_timer is not None: + self.clean_up_timer.cancel() + self.clean_up_timer = None + + def subscription_callback(self, msg: Message): + try: + self.record_incoming_msg(msg) + try: + msg.ack() + except Exception as e: + self.logger.warning("Failed to ack incoming message", e) + self.notify_after() + except Exception as e: + self.logger.error("Failed to process incoming message", e) + try: + msg.modify_ack_deadline(0) + msg.nack() + except Exception as e: + self.logger.warning("Failed to nack message", e) + + def get_messages(self) -> []: + unprocessed = [] + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" SELECT rowid, receive_time, publish_time, message_id, message_data FROM incoming WHERE process_time IS NULL ORDER BY publish_time """ + c = sqlite_conn.cursor() + c.execute(sql) + try: + rows = c.fetchall() + if rows is None: + return [] + + for row in rows: + m = {'id': row[0], + 'receive_time': row[1], + 'publish_time': row[2], + 'message': row[4] + } + unprocessed.append(m) + finally: + c.close() + return unprocessed + + def mark_as_read(self, msg): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" UPDATE incoming SET process_time = ? WHERE rowid = ? """ + sqlite_conn.execute(sql, (int(time.time() * 1000), msg['id'])) + + def publish_str(self, msg_str: str): + try: + msg_data = msg_str.encode('UTF-8') + self.publish_bin(msg_data) + except Exception as e: + self.logger.error("Failed to publish message", e) + raise e + + def publish_bin(self, msg_data: bytearray, rowid=None): + try: + if rowid is None: + rowid = self.record_outgoing_msg(msg_data) + future = self.publisher.publish(self.pub_topic_path, msg_data) + future.add_done_callback(lambda future: self.on_publish_done(future, rowid)) + except Exception as e: + self.logger.error("Failed to publish message", e) + raise e + + def on_publish_done(self, future: Future, rowid: int): + try: + future.result() + self.update_outgoing_msg_as_published(rowid) + except Exception as e: + self.logger.warning("Publisher returned error", e) + + def init_db(self): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = """ CREATE TABLE IF NOT EXISTS incoming ( + receive_time INTEGER, + publish_time INTEGER, + process_time INTEGER, + message_id TEXT, + message_data BLOB + ) """ + sqlite_conn.execute(sql) + + sql = """ CREATE TABLE IF NOT EXISTS outgoing ( + send_time INTEGER, + publish_time INTEGER, + message_data BLOB + ) """ + sqlite_conn.execute(sql) + + def record_incoming_msg(self, msg: Message): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = """SELECT rowid FROM incoming WHERE message_id=?""" + params = str(msg.message_id) + c = sqlite_conn.cursor() + c.execute(sql, [params]) + try: + row = c.fetchone() + if row is not None: + return row[0] + finally: + c.close() + + sql = f""" INSERT INTO incoming (receive_time, publish_time, message_id, message_data) + VALUES(?,?,?,?) """ + params = (int(time.time() * 1000), int(msg.publish_time.timestamp() * 1000), str(msg.message_id), msg.data) + sqlite_conn.execute(sql, params) + return sqlite_conn.cursor().lastrowid + + def update_incoming_msg_as_processed(self, rowid: int): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" UPDATE incoming SET process_time=? WHERE rowid=?""" + params = (int(time.time() * 1000), rowid) + sqlite_conn.execute(sql, params) + self.clean_up_after() + + def record_outgoing_msg(self, message_data: bytes): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" INSERT INTO outgoing (send_time, message_data) + VALUES(?,?) """ + params = (int(time.time() * 1000), message_data) + sqlite_conn.execute(sql, params) + return sqlite_conn.cursor().lastrowid + + def update_outgoing_msg_as_published(self, rowid: int): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" UPDATE outgoing SET publish_time=? WHERE rowid=?""" + params = (int(time.time() * 1000), rowid) + sqlite_conn.execute(sql, params) + self.clean_up_after() + + def publish_unpublished(self): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" SELECT rowid, message_data FROM outgoing WHERE publish_time IS NULL """ + c = sqlite_conn.cursor() + c.execute(sql) + try: + rows = c.fetchall() + if rows is None: + return + + for row in rows: + try: + self.publish_bin(row[1], rowid=row[0]) + except Exception as e: + self.logger.error('error publishing the unpublished', e) + finally: + c.close() + + def clean_up_after(self, seconds: int = 90): + if self.clean_up_timer is not None: + self.clean_up_timer.cancel() + self.clean_up_timer = Timer(seconds, self.clean_up) + + def clean_up(self): + self.clean_up_timer = None + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" DELETE FROM outgoing WHERE publish_time IS NOT NULL AND publish_time < ? """ + sqlite_conn.execute(sql, [int((time.time() - 60) * 1000)]) + sql = f""" DELETE FROM incoming WHERE process_time IS NOT NULL AND process_time < ? """ + sqlite_conn.execute(sql, [int((time.time() - 60) * 1000)]) + + def notify_after(self, seconds: int = 10): + if self.notify_timer is not None: + self.notify_timer.cancel() + self.notify_timer = Timer(seconds, self.notify) + + def notify(self): + self.notify_timer = None + self.incoming_message_event.set() \ No newline at end of file diff --git a/omnipy_remote_client.py b/omnipy_remote_client.py new file mode 100644 index 0000000..d022520 --- /dev/null +++ b/omnipy_remote_client.py @@ -0,0 +1,58 @@ +from omnipy_remote import OmniPyRemote +import simplejson as json +import os +from omnipy_response import parse_response_json +import time + + +class OmnipyRemoteClient: + def __init__(self, db_path: str): + self.db_path = db_path + self.remote = OmniPyRemote('omnicore17', 'py-rsp', 'py-cmd', 'client-test3', db_path) + + def start(self): + pass + + def stop(self): + pass + +def client_main(): + remote = OmniPyRemote('omnicore17', 'py-rsp', 'py-cmd', 'client-test3', '/home/pi/omnipy/client-pubsub.db') + remote.start() + + while True: + if remote.incoming_message_event.wait(timeout=5): + messages = remote.get_messages() + for msg in messages: + msg_id = None + try: + msg_id = msg['id'] + received = msg['receive_time'] + published = msg['publish_time'] + message = bytes.decode(msg['message'], encoding='UTF-8') + js = json.loads(message) + response = parse_response_json(js) + except Exception as e: + print(f'error parsing message\n{e}') + if msg_id is not None: + remote.mark_as_read([msg_id]) + + try: + self.record_response(response) + self. + except Exception as e: + pass + + + remote.stop() + + +if __name__ == "__main__": + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/pi/omnipy/google-settings.json" + client = OmnipyRemoteClient('home/pi/omnipy/client-pubsub.db') + + client.start() + + time.sleep(10) + + client.stop() diff --git a/omnipy_request.py b/omnipy_request.py new file mode 100644 index 0000000..2f0ca97 --- /dev/null +++ b/omnipy_request.py @@ -0,0 +1,157 @@ +from decimal import Decimal +import simplejson as json +import time + +OC_RQ_SYS_ID = "sys_id" +OC_RQ_POD_ID = "pod_id" +OC_RQ_REQ_ID = "req_id" +OC_RQ_REQ_EXPIRY = "req_expiry" + +OC_RQ_HEADER = "request" +OC_RQ_TYPE = "type" +OC_RQ_PARAMS = "params" +OC_RQ_PARAMS_LAST_STATUS = "last_status" + +OC_RQ_TYPE_BOLUS = "bolus" +OC_RQ_PARAMS_BOLUS_AMOUNT = "amount" +OC_RQ_PARAMS_BOLUS_INTERVAL = "interval" + +OC_RQ_TYPE_TEMP_BASAL = "temp_basal" +OC_RQ_PARAMS_TEMP_BASAL_RATE = "basal_rate" +OC_RQ_PARAMS_TEMP_BASAL_DURATION = "basal_duration" + +OC_RQ_TYPE_STATUS = "status" +OC_RQ_TYPE_UPDATE_STATUS = "update_status" + + +def get_now(): + return int(time.time() * 1000) + + +class OmnipyRequest: + def __init__(self, sys_id: str = None, pod_id: str = None, req_id: str = None, req_expiry: int = None): + self.sys_id = sys_id + self.pod_id = pod_id + self.req_id = req_id + self.req_expiry = req_expiry + self.type = None + self.bolus_amount = None + self.bolus_interval = None + self.temp_rate = None + self.temp_duration = None + self.last_status = None + self.response = None + + def is_expired(self): + if self.req_expiry is None: + return False + else: + return get_now() >= self.req_expiry + + def get_priority(self) -> int: + if self.type == OC_RQ_TYPE_TEMP_BASAL: + return 3 + if self.type == OC_RQ_TYPE_BOLUS: + return 2 + if self.type == OC_RQ_TYPE_UPDATE_STATUS: + return 1 + if self.type == OC_RQ_TYPE_STATUS: + return 0 + return -1 + + + def bolus(self, bolus_amount: Decimal, tick_interval: int, last_status: int, expiry_seconds: int = 60) -> str: + ts_now = get_now() + self.req_id = ts_now + r = { + OC_RQ_SYS_ID: self.sys_id, + OC_RQ_POD_ID: self.pod_id, + OC_RQ_TYPE: OC_RQ_TYPE_BOLUS, + OC_RQ_REQ_ID: ts_now, + OC_RQ_REQ_EXPIRY: ts_now + 1000*60, + OC_RQ_PARAMS: { + OC_RQ_PARAMS_BOLUS_AMOUNT: self.get_ticks(bolus_amount), + OC_RQ_PARAMS_BOLUS_INTERVAL: tick_interval, + OC_RQ_PARAMS_LAST_STATUS: last_status + } + } + return json.dumps(r) + + def temp_basal(self, basal_rate: Decimal, duration_minutes: int, last_status: int, + expiry_seconds: int = 60) -> str: + ts_now = get_now() + self.req_id = ts_now + r = { + OC_RQ_SYS_ID: self.sys_id, + OC_RQ_POD_ID: self.pod_id, + OC_RQ_TYPE: OC_RQ_TYPE_TEMP_BASAL, + OC_RQ_REQ_ID: ts_now, + OC_RQ_REQ_EXPIRY: ts_now + 1000*expiry_seconds, + OC_RQ_PARAMS: { + OC_RQ_PARAMS_TEMP_BASAL_RATE: self.get_ticks(basal_rate), + OC_RQ_PARAMS_TEMP_BASAL_DURATION: duration_minutes, + OC_RQ_PARAMS_LAST_STATUS: last_status + } + } + return json.dumps(r) + + def status(self): + ts_now = get_now() + self.req_id = ts_now + + r = { + OC_RQ_SYS_ID: self.sys_id, + OC_RQ_POD_ID: self.pod_id, + OC_RQ_TYPE: OC_RQ_TYPE_STATUS, + OC_RQ_REQ_ID: ts_now, + } + return json.dumps(r) + + def update_status(self, last_status: int, expiry_seconds: int = 60): + ts_now = get_now() + self.req_id = ts_now + r = { + OC_RQ_SYS_ID: self.sys_id, + OC_RQ_POD_ID: self.pod_id, + OC_RQ_TYPE: OC_RQ_TYPE_UPDATE_STATUS, + OC_RQ_REQ_ID: ts_now, + OC_RQ_REQ_EXPIRY: ts_now + 1000 * expiry_seconds, + OC_RQ_PARAMS: { + OC_RQ_PARAMS_LAST_STATUS: last_status + } + } + return json.dumps(r) + + +def get_ticks(d: Decimal) ->int: + return int(round(d / Decimal("0.05"))) + + +def get_decimal(ticks: int) -> Decimal: + return Decimal("0.05") * ticks + + +def parse_request_json(js: dict) -> OmnipyRequest: + r = OmnipyRequest() + r.sys_id = js[OC_RQ_SYS_ID] + r.pod_id = js[OC_RQ_POD_ID] + r.type = js[OC_RQ_TYPE] + r.req_id = js[OC_RQ_REQ_ID] + if OC_RQ_REQ_EXPIRY in js: + r.req_expiry = js[OC_RQ_REQ_EXPIRY] + + if OC_RQ_PARAMS in js: + p = js[OC_RQ_PARAMS] + if r.type == OC_RQ_TYPE_BOLUS: + r.bolus_amount = get_decimal(p[OC_RQ_PARAMS_BOLUS_AMOUNT]) + r.bolus_interval = p[OC_RQ_PARAMS_BOLUS_INTERVAL] + r.last_status = p[OC_RQ_PARAMS_LAST_STATUS] + elif r.type == OC_RQ_TYPE_TEMP_BASAL: + r.temp_rate = get_decimal(p[OC_RQ_PARAMS_TEMP_BASAL_RATE]) + r.temp_duration = p[OC_RQ_PARAMS_TEMP_BASAL_DURATION] + r.last_status = p[OC_RQ_PARAMS_LAST_STATUS] + elif r.type == OC_RQ_TYPE_UPDATE_STATUS: + r.last_status = p[OC_RQ_PARAMS_LAST_STATUS] + elif r.type == OC_RQ_TYPE_STATUS: + pass + return r diff --git a/omnipy_response.py b/omnipy_response.py new file mode 100644 index 0000000..57d0c4e --- /dev/null +++ b/omnipy_response.py @@ -0,0 +1,54 @@ +from decimal import Decimal +import simplejson as json +import time + +OC_RP_SYS_ID = "sys_id" +OC_RP_POD_ID = "pod_id" +OC_RP_REQ_ID = "req_id" +OC_RP_RESULT = "result" +OC_RP_TIME = "time" +OC_RP_POD_JSON = "pod_json" + +OC_RP_RESULT_SUCCESS = "success" +OC_RP_RESULT_EXPIRED = "expired" +OC_RP_RESULT_REDUNDANT = "redundant" +OC_RP_RESULT_FAILED = "failed" +OC_RP_RESULT_STATUS_MISMATCH = "mismatch" + + +def get_now(): + return int(time.time() * 1000) + + +class OmnipyResponse: + def __init__(self, sys_id: str = None, pod_id: str = None, req_id: str = None, + response_result: str = None, + pod_json: dict = None): + self.sys_id = sys_id + self.pod_id = pod_id + self.req_id = req_id + self.response_time = get_now() + self.response_result = response_result + self.pod_json = pod_json + + def as_json_str(self) -> str: + r = { + OC_RP_SYS_ID: self.sys_id, + OC_RP_POD_ID: self.pod_id, + OC_RP_REQ_ID: self.req_id, + OC_RP_RESULT: self.response_result, + OC_RP_TIME: self.response_time, + OC_RP_POD_JSON: self.pod_json, + } + return json.dumps(r) + + +def parse_response_json(js: dict) -> OmnipyResponse: + r = OmnipyResponse() + r.sys_id = js[OC_RP_SYS_ID] + r.pod_id = js[OC_RP_POD_ID] + r.req_id = js[OC_RP_REQ_ID] + r.response_result = js[OC_RP_RESULT] + r.response_time = js[OC_RP_TIME] + r.pod_json = js[OC_RP_POD_JSON] + return r \ No newline at end of file diff --git a/omnipyremote.py b/omnipyremote.py new file mode 100644 index 0000000..260ab76 --- /dev/null +++ b/omnipyremote.py @@ -0,0 +1,274 @@ +import signal +import time +from concurrent.futures.process import ProcessPoolExecutor +from concurrent.futures.thread import ThreadPoolExecutor +from logging import Logger, DEBUG +from threading import Timer + +import simplejson as json +import os + +from google.api_core.exceptions import AlreadyExists +from google.cloud import pubsub_v1 +from google.cloud.pubsub_v1.futures import Future +from google.cloud.pubsub_v1.subscriber.message import Message +from google.cloud.pubsub_v1.subscriber.scheduler import ThreadScheduler + +from omnipyrequest import * +import sqlite3 + + +class OmniPyRemote: + def __init__(self, project_id: str, sub_topic: str, pub_topic: str, client_id: str, + path_db: str): + + try: + self.logger = Logger('omnipy_remote', level=DEBUG) + subscriber = pubsub_v1.SubscriberClient() + sub_topic_path = subscriber.topic_path(project_id, sub_topic) + subscription_path = subscriber.subscription_path(project_id, f'sub-{sub_topic}-{client_id}') + try: + subscriber.create_subscription(subscription_path, sub_topic_path) + except AlreadyExists: + pass + + self.subscriber = subscriber + self.subscription_path = subscription_path + self.subscription_future = None + + self.publisher = pubsub_v1.PublisherClient() + self.pub_topic_path = self.publisher.topic_path(project_id, pub_topic) + self.project_id = project_id + self.path_db = path_db + self.clean_up_timer = None + self.init_db() + + except Exception as e: + self.logger.error('Initialization failed', e) + raise e + + def start(self): + self.logger.debug('starting') + try: + self.publish_unpublished() + except Exception as e: + self.logger.error("Failed to publish unpublished messages during start-up", e) + + self.clean_up_after() + + try: + self.subscription_future = self.subscriber.subscribe(self.subscription_path, + callback=self.subscription_callback, + scheduler=ThreadScheduler( + executor=ThreadPoolExecutor(max_workers=2) + )) + + self.clean_up_timer = Timer(300.0, self.clean_up) + except Exception as e: + self.logger.error("Failed to subscribe to topic", e) + + def stop(self): + self.logger.debug('stopping') + + if self.clean_up_timer is not None: + self.clean_up_timer.cancel() + + try: + if self.subscription_future is not None: + self.subscription_future.cancel() + self.subscription_future = None + + self.subscriber.close() + self.subscriber = None + + except Exception as e: + self.logger.error("Failed to close the subscription", e) + + try: + self.publisher.stop() + except Exception as e: + self.logger.error("Failed to stop the publisher", e) + + def subscription_callback(self, msg: Message): + try: + self.record_incoming_msg(msg) + try: + msg.ack() + except Exception as e: + self.logger.warning("Failed to ack incoming message", e) + except Exception as e: + self.logger.error("Failed to process incoming message", e) + try: + msg.modify_ack_deadline(0) + msg.nack() + except Exception as e: + self.logger.warning("Failed to nack message", e) + + def get_messages(self) -> []: + unprocessed = [] + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" SELECT rowid, receive_time, publish_time, message_id, message_data FROM incoming WHERE process_time IS NULL ORDER BY publish_time """ + c = sqlite_conn.cursor() + c.execute(sql) + try: + rows = c.fetchall() + if rows is None: + return [] + + for row in rows: + m = {'id': row[0], + 'receive_time': row[1], + 'publish_time': row[2], + 'message': row[4] + } + unprocessed.append(m) + finally: + c.close() + return unprocessed + + def mark_as_read(self, message_ids: []): + if message_ids is None or len(message_ids) == 0: + return + + with sqlite3.connect(self.path_db) as sqlite_conn: + for id in message_ids: + sql = f""" UPDATE incoming SET process_time = ? WHERE rowid = ? """ + sqlite_conn.execute(sql, (int(time.time()*1000), id)) + + def publish_str(self, msg_str: str): + try: + msg_data = msg_str.encode('UTF-8') + self.publish_bin(msg_data) + except Exception as e: + self.logger.error("Failed to publish message", e) + raise e + + def publish_bin(self, msg_data: bytearray, rowid=None): + try: + if rowid is None: + rowid = self.record_outgoing_msg(msg_data) + future = self.publisher.publish(self.pub_topic_path, msg_data) + future.add_done_callback(lambda future: self.on_publish_done(future, rowid)) + except Exception as e: + self.logger.error("Failed to publish message", e) + raise e + + def on_publish_done(self, future: Future, rowid: int): + try: + future.result() + self.update_outgoing_msg_as_published(rowid) + except Exception as e: + self.logger.warning("Publisher returned error", e) + + def init_db(self): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = """ CREATE TABLE IF NOT EXISTS incoming ( + receive_time INTEGER, + publish_time INTEGER, + process_time INTEGER, + message_id TEXT, + message_data BLOB + ) """ + sqlite_conn.execute(sql) + + sql = """ CREATE TABLE IF NOT EXISTS outgoing ( + send_time INTEGER, + publish_time INTEGER, + message_data BLOB + ) """ + sqlite_conn.execute(sql) + + def record_incoming_msg(self, msg: Message): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = """SELECT rowid FROM incoming WHERE message_id=?""" + params = str(msg.message_id) + c = sqlite_conn.cursor() + c.execute(sql, [params]) + try: + row = c.fetchone() + if row is not None: + return row[0] + finally: + c.close() + + sql = f""" INSERT INTO incoming (receive_time, publish_time, message_id, message_data) + VALUES(?,?,?,?) """ + params = (int(time.time() * 1000), int(msg.publish_time.timestamp() * 1000), str(msg.message_id), msg.data) + sqlite_conn.execute(sql, params) + return sqlite_conn.cursor().lastrowid + + def update_incoming_msg_as_processed(self, rowid: int): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" UPDATE incoming SET process_time=? WHERE rowid=?""" + params = (int(time.time() * 1000), rowid) + sqlite_conn.execute(sql, params) + self.clean_up_after() + + def record_outgoing_msg(self, message_data: bytes): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" INSERT INTO outgoing (send_time, message_data) + VALUES(?,?) """ + params = (int(time.time() * 1000), message_data) + sqlite_conn.execute(sql, params) + return sqlite_conn.cursor().lastrowid + + def update_outgoing_msg_as_published(self, rowid: int): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" UPDATE outgoing SET publish_time=? WHERE rowid=?""" + params = (int(time.time() * 1000), rowid) + sqlite_conn.execute(sql, params) + self.clean_up_after() + + def publish_unpublished(self): + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" SELECT rowid, message_data FROM outgoing WHERE publish_time IS NULL """ + c = sqlite_conn.cursor() + c.execute(sql) + try: + rows = c.fetchall() + if rows is None: + return + + for row in rows: + try: + self.publish_bin(row[1], rowid=row[0]) + except Exception as e: + self.logger.error('error publishing the unpublished', e) + finally: + c.close() + + def clean_up_after(self, seconds:int = 90): + if self.clean_up_timer is not None: + self.clean_up_timer.cancel() + self.clean_up_timer = Timer(seconds, self.clean_up) + + def clean_up(self): + self.clean_up_timer = None + with sqlite3.connect(self.path_db) as sqlite_conn: + sql = f""" DELETE FROM outgoing WHERE publish_time IS NOT NULL AND publish_time < ? """ + sqlite_conn.execute(sql, [int((time.time() - 60)*1000)]) + sql = f""" DELETE FROM incoming WHERE process_time IS NOT NULL AND process_time < ? """ + sqlite_conn.execute(sql, [int((time.time() - 60)*1000)]) + +def _exit_with_grace(a, b, opr: OmniPyRemote): + opr.stop() + #os.sync() + exit(0) + + +if __name__ == "__main__": + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "c:\\dev\\pluxy\\google-secret.json" + opr = OmniPyRemote('omnicore17', 'py-rsp', 'py-cmd', 'test1', 'pyremote.db') + signal.signal(signal.SIGTERM, lambda a, b: _exit_with_grace(a,b, opr)) + signal.signal(signal.SIGABRT, lambda a, b: _exit_with_grace(a, b, opr)) + + try: + opr.start() + while True: + opr.publish_str('hey baldi over here!') + time.sleep(10) + except KeyboardInterrupt: + opr.stop() + except Exception as e: + opr.logger.error('Service exiting unexpectedly', e) + opr.stop() diff --git a/podcomm/definitions.py b/podcomm/definitions.py index 60d3b9b..356522b 100644 --- a/podcomm/definitions.py +++ b/podcomm/definitions.py @@ -1,29 +1,37 @@ -from enum import IntEnum -import os import logging -from logging.handlers import MemoryHandler +import os +from enum import IntEnum + +DATA_PATH = "./data/" -KEY_FILE = "data/key" -LAST_ACTIVATED_FILE = "data/lastactivated" -POD_FILE = "data/pod" +KEY_FILE = "key" +LAST_ACTIVATED_FILE = "lastactivated" + +POD_FILE = "pod" POD_FILE_SUFFIX = ".json" -POD_LOG_SUFFIX = ".log" -LOG_PATH = "./data" + +POD_DB_SUFFIX = ".db" +LOGFILE_SUFFIX = ".log" + OMNIPY_LOGGER = "OMNIPY" -OMNIPY_LOGFILE = "data/omnipy.log" +OMNIPY_LOGFILE = "omnipy" + OMNIPY_PACKET_LOGGER = "OMNIPACKET" -OMNIPY_PACKET_LOGFILE = "data/packet.log" +OMNIPY_PACKET_LOGFILE = "packet" -OMNIPY_LOGFILE_PREFIX = "data/omnipy" -OMNIPY_LOGFILE_SUFFIX = ".log" -OMNIPY_LOGFILE = OMNIPY_LOGFILE_PREFIX + OMNIPY_LOGFILE_SUFFIX +OMNIPY_DATABASE = "omni.db" API_VERSION_MAJOR = 1 -API_VERSION_MINOR = 3 +API_VERSION_MINOR = 5 +API_VERSION_REVISION = 0 +API_VERSION_BUILD = 20070 REST_URL_PING = "/omnipy/ping" REST_URL_OMNIPY_SHUTDOWN = "/omnipy/shutdown" REST_URL_OMNIPY_RESTART = "/omnipy/restart" +REST_URL_OMNIPY_UPDATE = "/omnipy/updatesw" +REST_URL_OMNIPY_WIFI = "/omnipy/updatewlan" +REST_URL_OMNIPY_CHANGE_PASSWORD = "/omnipy/changepw" REST_URL_TOKEN = "/omnipy/token" REST_URL_CHECK_PASSWORD = "/omnipy/pwcheck" @@ -34,6 +42,9 @@ REST_URL_RL_INFO = "/rl/info" +REST_URL_SILENCE_ALARMS = "/pdm/silence" +REST_URL_ARCHIVE_POD = "/pdm/archive" +REST_URL_PAIR_POD = "/pdm/pair" REST_URL_ACTIVATE_POD = "/pdm/activate" REST_URL_START_POD = "/pdm/start" REST_URL_STATUS = "/pdm/status" @@ -49,35 +60,36 @@ logger = None packet_logger = None + def ensure_log_dir(): - if not os.path.isdir(LOG_PATH): - os.mkdir(LOG_PATH) + if not os.path.isdir(DATA_PATH): + os.mkdir(DATA_PATH) -def getLogger(): +def getLogger(with_console=False): global logger if logger is None: ensure_log_dir() logger = logging.getLogger(OMNIPY_LOGGER) logger.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - fh = logging.FileHandler(OMNIPY_LOGFILE) + fh = logging.FileHandler(DATA_PATH + OMNIPY_LOGFILE + LOGFILE_SUFFIX) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) + logger.addHandler(fh) - mh = MemoryHandler(capacity=256*1024, target=fh) - logger.addHandler(mh) - - ch = logging.StreamHandler() - ch.setLevel(logging.DEBUG) - ch.setFormatter(formatter) - logger.addHandler(ch) + if with_console: + ch = logging.StreamHandler() + ch.setLevel(logging.DEBUG) + ch.setFormatter(formatter) + logger.addHandler(ch) return logger -def get_packet_logger(): + +def get_packet_logger(with_console=False): global packet_logger if packet_logger is None: @@ -86,21 +98,28 @@ def get_packet_logger(): packet_logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(message)s') - fh = logging.FileHandler(OMNIPY_PACKET_LOGFILE) + fh = logging.FileHandler(DATA_PATH + OMNIPY_PACKET_LOGFILE + LOGFILE_SUFFIX) fh.setLevel(logging.INFO) fh.setFormatter(formatter) + packet_logger.addHandler(fh) - mh = MemoryHandler(capacity=4*1024, target=fh) - packet_logger.addHandler(mh) + if with_console: + ch = logging.StreamHandler() + ch.setLevel(logging.DEBUG) + ch.setFormatter(formatter) + packet_logger.addHandler(ch) return packet_logger + def configureLogging(): pass + class RequestType(IntEnum): pass + class BolusState(IntEnum): NotRunning = 0 Extended = 1 @@ -129,27 +148,16 @@ class PodProgress(IntEnum): Inactive = 15 -class PodAlert(IntEnum): - AutoOff = 0x01 - Unknown = 0x02 - EndOfService = 0x04 - Expired = 0x08 - LowReservoir = 0x10 - SuspendInProgress = 0x20 - SuspendEnded = 0x40 - TimerLimit = 0x80 - - -class PodAlertBit(IntEnum): - AutoOff = 0x00 - Unknown = 0x01 - EndOfService = 0x02 - Expired = 0x03 - LowReservoir = 0x04 - SuspendInProgress = 0x05 - SuspendEnded = 0x06 - TimerLimit = 0x07 - +class AlertConfiguration: + def __init__(self): + self.alert_index = None + self.activate = False + self.trigger_auto_off = False + self.alert_after_minutes = None + self.alert_after_reservoir = None + self.alert_duration = None + self.beep_type = 0 + self.beep_repeat_type = 0 class BeepPattern(IntEnum): Once = 0 diff --git a/podcomm/exceptions.py b/podcomm/exceptions.py index 5a73ac5..95ff3ec 100644 --- a/podcomm/exceptions.py +++ b/podcomm/exceptions.py @@ -11,13 +11,27 @@ def __init__(self, message="Unknown RL error", err_code=None): class ProtocolError(OmnipyError): - def __init__(self, message="Unknown protocol error"): + def __init__(self, message="Unknown protocol error", packet=None): OmnipyError.__init__(self, message) + self.packet = packet + + +class RecoverableProtocolError(ProtocolError): + def __init__(self, message, packet): + OmnipyError.__init__(self, message) + self.packet = packet + + +class StatusUpdateRequired(OmnipyError): + def __init(self): + OmnipyError.__init__(self) + class OmnipyTimeoutError(OmnipyError): def __init__(self, message="Timeout error"): OmnipyError.__init__(self, message) + class PdmError(OmnipyError): def __init__(self, message="Unknown pdm error"): OmnipyError.__init__(self, message) diff --git a/podcomm/manchester.py b/podcomm/manchester.py index 507fab8..a535343 100644 --- a/podcomm/manchester.py +++ b/podcomm/manchester.py @@ -4,17 +4,19 @@ def encodeSingleByte(d): e = 0 for i in range(0, 8): - e = e << 2 + + e = e >> 2 if d & 0x01 == 0: - e |= 2 + e |= 0x8000 else: - e |= 1 + e |= 0x4000 d = d >> 1 return bytes([(e >> 8), e & 0xff]) class ManchesterCodec: def __init__(self): - self.preamble = bytes([0x66,0x65]) * 200 + bytes([0xa5, 0x5a]) + #self.preamble = bytes([0x65,0x66]) * 20 + bytes([0xa5, 0x5a]) + self.preamble = bytes() self.decode_dict = dict() self.encode_dict = dict() for i in range(0, 256): @@ -27,7 +29,7 @@ def __init__(self): self.noiseLines = [] for x in range(0, 32): noiseLine = "f" - for i in range(0, 159): + for i in range(0, 79): noiseLine += random.choice(noiseNibbles) self.noiseLines.append(bytearray.fromhex(noiseLine)) @@ -48,13 +50,5 @@ def encode(self, data): encoded += self.noiseLines[self.noiseSeq] self.noiseSeq += 1 self.noiseSeq %= 32 - - minPreamble = 4 - minNoise = 2 - available = 512 - len(data) - minPreamble - minNoise - dataIndex = len(self.preamble) - portion = int(available / 2) - preambleIncluded = minPreamble + portion - noiseIncluded = minNoise + available - portion - return encoded[dataIndex - preambleIncluded: dataIndex + noiseIncluded] + return encoded[0:80] diff --git a/podcomm/nonce.py b/podcomm/nonce.py index 0eb48ac..76a12f7 100644 --- a/podcomm/nonce.py +++ b/podcomm/nonce.py @@ -33,7 +33,7 @@ def reset(self): def sync(self, syncWord, msgSequence): w_sum = (self.lastNonce & 0xFFFF) + (crc16_table[msgSequence] & 0xFFFF) \ + (self.lot & 0xFFFF) + (self.tid & 0xFFFF) - self.seed = (w_sum & 0xFFFF) ^ syncWord + self.seed = ((w_sum & 0xFFFF) ^ syncWord) & 0xff self.lastNonce = None self.nonce_runs = 0 self._initialize() diff --git a/podcomm/packet_radio.py b/podcomm/packet_radio.py index 2f8b8d5..c22846c 100644 --- a/podcomm/packet_radio.py +++ b/podcomm/packet_radio.py @@ -5,9 +5,10 @@ class TxPower(IntEnum): Lowest = 0 Low = 1 - Normal = 2 - High = 3 - Highest = 4 + Lower = 2 + Normal = 3 + High = 4 + Highest = 5 class PacketRadio(abc.ABC): diff --git a/podcomm/pdm.py b/podcomm/pdm.py index e7b4796..3821352 100644 --- a/podcomm/pdm.py +++ b/podcomm/pdm.py @@ -1,7 +1,7 @@ from .protocol import * from .protocol_radio import PdmRadio from .nonce import * -from .exceptions import PdmError, OmnipyError, PdmBusyError +from .exceptions import PdmError, OmnipyError, PdmBusyError, StatusUpdateRequired from .definitions import * from .packet_radio import TxPower from decimal import * @@ -34,12 +34,21 @@ def __init__(self, pod): self.pod = pod self.nonce = None self.radio = None + self.time_adjustment = 0 self.logger = getLogger() + def stop_radio(self): + if self.radio is not None: + self.radio.stop() + self.radio = None + + def start_radio(self): + self.get_radio(new=True) + def get_nonce(self): if self.nonce is None: if self.pod.id_lot is None or self.pod.id_t is None: - raise PdmError("Cannot generate nonce without pod lot and id") + return None if self.pod.nonce_last is None or self.pod.nonce_seed is None: self.nonce = Nonce(self.pod.id_lot, self.pod.id_t) else: @@ -62,13 +71,17 @@ def get_radio(self, new=False): return self.radio - def send_request(self, request, with_nonce=False, double_take=False, expect_critical_follow_up=False): + def send_request(self, request, with_nonce=False, double_take=False, + expect_critical_follow_up=False, + tx_power=TxPower.Normal): nonce_obj = self.get_nonce() if with_nonce: nonce_val = nonce_obj.getNext() request.set_nonce(nonce_val) + self.pod.nonce_syncword = None + self.get_radio().start_rssi_averaging() response = self.get_radio().send_message_get_message(request, double_take=double_take, expect_critical_follow_up=expect_critical_follow_up) response_parse(response, self.pod) @@ -87,61 +100,143 @@ def send_request(self, request, with_nonce=False, double_take=False, expect_crit self.get_nonce().reset() raise PdmError("Nonce sync failed") + return self.get_radio().get_rssi_average() + def _internal_update_status(self, update_type=0): self._assert_pod_address_assigned() - self.send_request(request_status(update_type)) + return self.send_request(request_status(update_type)) def update_status(self, update_type=0): + rssi = 0 try: with PdmLock(): self.logger.info("Updating pod status, request type %d" % update_type) - self._internal_update_status(update_type) - except OmnipyError: - raise + self.pod.last_command = { "command": "STATUS", "type": update_type, "success": False } + rssi = self._internal_update_status(update_type) + self.pod.last_command["success"] = True + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + rssi = self._internal_update_status(1) + if update_type != 1: + self.update_status(update_type=update_type) except Exception as e: raise PdmError("Unexpected error") from e finally: self._savePod() + return rssi + def acknowledge_alerts(self, alert_mask): try: with PdmLock(): + self.logger.info("Acknowledging alerts with bitmask %d" % alert_mask) + self.pod.last_command = {"command": "ACK_ALERTS", "mask": alert_mask, "success": False} self._assert_pod_address_assigned() self._internal_update_status() + self._assert_immediate_bolus_not_active() self._assert_can_acknowledge_alerts() if self.pod.state_alert | alert_mask != self.pod.state_alert: raise PdmError("Bitmask invalid for current alert state") - self.logger.info("Acknowledging alerts with bitmask %d" % alert_mask) request = request_acknowledge_alerts(alert_mask) self.send_request(request, with_nonce=True) if self.pod.state_alert & alert_mask != 0: raise PdmError("Failed to acknowledge one or more alerts") - except OmnipyError: - raise + self.pod.last_command["success"] = True + + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.acknowledge_alerts(alert_mask) except Exception as e: raise PdmError("Unexpected error") from e finally: self._savePod() - def is_busy(self): + # def configure_reservoir_alarm(self, iu_reservoir_level=None): + # try: + # with PdmLock(0): + # if iu_reservoir_level is None: + # request = request_clear_low_reservoir_alert() + # else: + # request = request_set_low_reservoir_alert(self.pod.var_alert_low_reservoir) + # self.send_request(request, with_nonce=True) + # self.pod.var_alert_low_reservoir_set = True + # except OmnipyError: + # raise + # except Exception as e: + # raise PdmError("Unexpected error") from e + # + # def configure_pod_expiry_alarm(self, minutes_after_activation=None): + # try: + # with PdmLock(0): + # if minutes_after_activation is None: + # request = request_clear_pod_expiry_alert() + # else: + # request = request_set_pod_expiry_alert(minutes_after_activation) + # self.send_request(request, with_nonce=True) + # self.pod.var_alert_low_reservoir_set = True + # except OmnipyError: + # raise + # except Exception as e: + # raise PdmError("Unexpected error") from e + def hf_silence_will_fall(self): try: - with PdmLock(0): - return self._is_bolus_running() - except PdmBusyError: - return True - except OmnipyError: - raise + with PdmLock(): + self._internal_update_status() + self._assert_immediate_bolus_not_active() + if self.pod.state_alert > 0: + self.logger.info("Acknowledging alerts with bitmask %d" % self.pod.state_alert) + self.pod.last_command = {"command": "ACK_ALERTS", "mask": self.pod.state_alert, "success": False} + request = request_acknowledge_alerts(self.pod.state_alert) + self.send_request(request, with_nonce=True) + self.pod.last_command = {"command": "ACK_ALERTS", "mask": self.pod.state_alert, "success": False} + + self._internal_update_status(1) + + active_alerts = [] + if self.pod.state_alerts is not None: + for ai in range(0,8): + if self.pod.state_alerts[ai] > 0: + active_alerts.append(ai) + + if len(active_alerts) == 0: + self.logger.info("No alerts active") + else: + self.logger.info("Clearing alerts: %s" % str(active_alerts)) + acs = [] + for i in active_alerts: + ac = AlertConfiguration() + ac.activate = False + ac.alert_after_minutes = 0 + ac.alert_duration = 0 + ac.alert_index = i + acs.append(ac) + request = request_acknowledge_alerts(self.pod.state_alert) + self.send_request(request, with_nonce=True) + self.pod.last_command["success"] = True + + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.hf_silence_will_fall() except Exception as e: raise PdmError("Unexpected error") from e + finally: + self._savePod() - def bolus(self, bolus_amount): + def is_busy(self): + return False + + def bolus(self, bolus_amount, pulse_interval=2): try: with PdmLock(): + self.pod.last_command = {"command": "BOLUS", "units": bolus_amount, "interval": pulse_interval, "success": False} + self._assert_pod_address_assigned() - self._internal_update_status() self._assert_can_generate_nonce() + self._internal_update_status() self._assert_immediate_bolus_not_active() self._assert_not_faulted() self._assert_status_running() @@ -155,21 +250,25 @@ def bolus(self, bolus_amount): if self._is_bolus_running(): raise PdmError("A previous bolus is already running") - if bolus_amount > self.pod.insulin_reservoir: - raise PdmError("Cannot bolus %.2f units, insulin_reservoir capacity is at: %.2f") + pulse_interval = int(pulse_interval) + if pulse_interval < 2 or pulse_interval * 20 * bolus_amount > 1800: + raise PdmError("Cannot bolus at this interval for this amount") self.logger.debug("Bolusing %0.2f" % float(bolus_amount)) - request = request_bolus(bolus_amount) + request = request_bolus(bolus_amount, pulse_interval) self.send_request(request, with_nonce=True) if self.pod.state_bolus != BolusState.Immediate: raise PdmError("Pod did not confirm bolus") - self.pod.last_enacted_bolus_start = time.time() + self.pod.last_enacted_bolus_start = self.get_time() self.pod.last_enacted_bolus_amount = float(bolus_amount) - - except OmnipyError: - raise + self.pod.last_enacted_bolus_pulse_interval = pulse_interval + self.pod.last_command["success"] = True + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.bolus(bolus_amount) except Exception as e: raise PdmError("Unexpected error") from e finally: @@ -179,25 +278,35 @@ def bolus(self, bolus_amount): def cancel_bolus(self): try: with PdmLock(): + self.logger.debug("Canceling bolus") + self.pod.last_command = {"command": "BOLUS_CANCEL", "canceled": 0, "success": False} self._assert_pod_address_assigned() self._assert_can_generate_nonce() + self._internal_update_status() self._assert_not_faulted() self._assert_status_running() if self._is_bolus_running(): - self.logger.debug("Canceling running bolus") + pi = self.pod.last_enacted_bolus_pulse_interval + if pi is not None and pi > 0: + if self.pod.insulin_canceled * 20 * pi < 16: + raise PdmError("Too close to cancel") request = request_cancel_bolus() self.send_request(request, with_nonce=True) if self.pod.state_bolus == BolusState.Immediate: raise PdmError("Failed to cancel bolus") else: self.pod.last_enacted_bolus_amount = float(-1) - self.pod.last_enacted_bolus_start = time.time() + self.pod.last_enacted_bolus_start = self.get_time() + self.pod.last_command["success"] = True + self.pod.last_command["canceled"] = self.pod.insulin_canceled else: raise PdmError("Bolus is not running") - except OmnipyError: - raise + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.cancel_bolus() except Exception as e: raise PdmError("Unexpected error") from e finally: @@ -206,28 +315,33 @@ def cancel_bolus(self): def cancel_temp_basal(self): try: with PdmLock(): + self.logger.debug("Canceling temp basal") + self.pod.last_command = {"command": "TEMPBASAL_CANCEL", "success": False} + self._assert_pod_address_assigned() - self._internal_update_status() self._assert_can_generate_nonce() + self._internal_update_status() self._assert_immediate_bolus_not_active() self._assert_not_faulted() self._assert_status_running() if self._is_temp_basal_active(): - self.logger.debug("Canceling temp basal") request = request_cancel_temp_basal() self.send_request(request, with_nonce=True) if self.pod.state_basal == BasalState.TempBasal: raise PdmError("Failed to cancel temp basal") else: self.pod.last_enacted_temp_basal_duration = float(-1) - self.pod.last_enacted_temp_basal_start = time.time() + self.pod.last_enacted_temp_basal_start = self.get_time() self.pod.last_enacted_temp_basal_amount = float(-1) + self.pod.last_command["success"] = True else: self.logger.warning("Cancel temp basal received, while temp basal was not active. Ignoring.") - except OmnipyError: - raise + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.cancel_temp_basal() except Exception as e: raise PdmError("Unexpected error") from e finally: @@ -236,9 +350,15 @@ def cancel_temp_basal(self): def set_temp_basal(self, basalRate, hours, confidenceReminder=False): try: with PdmLock(): + self.logger.debug("Setting temp basal %02.2fU/h for %02.1fh"% (float(basalRate), float(hours))) + self.pod.last_command = {"command": "TEMPBASAL", + "duration_hours": hours, + "hourly_rate": basalRate, + "success": False} + self._assert_pod_address_assigned() - self._internal_update_status() self._assert_can_generate_nonce() + self._internal_update_status() self._assert_immediate_bolus_not_active() self._assert_not_faulted() self._assert_status_running() @@ -249,7 +369,7 @@ def set_temp_basal(self, basalRate, hours, confidenceReminder=False): if self.pod.var_maximum_temp_basal_rate is not None and \ basalRate > Decimal(self.pod.var_maximum_temp_basal_rate): raise PdmError("Requested rate exceeds maximum temp basal setting") - if basalRate > Decimal(30): + if basalRate > Decimal(45): raise PdmError("Requested rate exceeds maximum temp basal capability") if self._is_temp_basal_active(): @@ -258,7 +378,7 @@ def set_temp_basal(self, basalRate, hours, confidenceReminder=False): self.send_request(request, with_nonce=True) if self.pod.state_basal == BasalState.TempBasal: raise PdmError("Failed to cancel running temp basal") - self.logger.debug("Setting temp basal %02.2fU/h for %02.1fh"% (float(basalRate), float(hours))) + request = request_temp_basal(basalRate, hours) self.send_request(request, with_nonce=True) @@ -266,11 +386,14 @@ def set_temp_basal(self, basalRate, hours, confidenceReminder=False): raise PdmError("Failed to set temp basal") else: self.pod.last_enacted_temp_basal_duration = float(hours) - self.pod.last_enacted_temp_basal_start = time.time() + self.pod.last_enacted_temp_basal_start = self.get_time() self.pod.last_enacted_temp_basal_amount = float(basalRate) + self.pod.last_command["success"] = True - except OmnipyError: - raise + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.set_temp_basal(basalRate=basalRate, hours=hours) except Exception as e: raise PdmError("Unexpected error") from e finally: @@ -279,9 +402,13 @@ def set_temp_basal(self, basalRate, hours, confidenceReminder=False): def set_basal_schedule(self, schedule): try: with PdmLock(): + self.logger.debug("Setting basal schedule: %s"% schedule) + self.pod.last_command = {"command": "BASALSCHEDULE", + "hourly_rates": schedule, + "success": False} self._assert_pod_address_assigned() - self._internal_update_status() self._assert_can_generate_nonce() + self._internal_update_status() self._assert_immediate_bolus_not_active() self._assert_not_faulted() self._assert_status_running() @@ -291,7 +418,8 @@ def set_basal_schedule(self, schedule): self._assert_basal_schedule_is_valid(schedule) - pod_date = datetime.utcnow() + timedelta(minutes=self.pod.var_utc_offset) + pod_date = datetime.utcnow() + timedelta(minutes=self.pod.var_utc_offset) \ + + timedelta(seconds=self.time_adjustment) hours = pod_date.hour minutes = pod_date.minute @@ -304,9 +432,12 @@ def set_basal_schedule(self, schedule): raise PdmError("Failed to set basal schedule") else: self.pod.var_basal_schedule = schedule + self.pod.last_command["success"] = True - except OmnipyError: - raise + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.set_basal_schedule(schedule) except Exception as e: raise PdmError("Unexpected error") from e finally: @@ -315,144 +446,245 @@ def set_basal_schedule(self, schedule): def deactivate_pod(self): try: with PdmLock(): + self._assert_immediate_bolus_not_active() + self.logger.debug("Deactivating pod") + self.pod.last_command = {"command": "DEACTIVATE", "success": False} self._internal_update_status() self._assert_can_deactivate() - self.logger.debug("Deactivating pod") request = request_deactivate() self.send_request(request, with_nonce=True) - except OmnipyError: - raise + if self.pod.state_progress != PodProgress.Inactive: + raise PdmError("Failed to deactivate") + else: + self.pod.last_command["success"] = True + + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.deactivate_pod() except Exception as e: raise PdmError("Unexpected error") from e finally: self._savePod() - def activate_pod(self, candidate_address, utc_offset): + def pair_pod(self, candidate_address, utc_offset): try: with PdmLock(): - self.pod.radio_address = 0xffffffff - self.pod.var_utc_offset = utc_offset - - self._assert_pod_activate_can_start() - - - radio = self.get_radio(new=True) - - radio.radio_address = 0xffffffff + self.logger.debug("Activating pod") + self.pod.last_command = {"command": "PAIR", + "address": candidate_address, + "utc_offset": utc_offset, + "success": False} - request = request_assign_address(candidate_address) - response = self.get_radio().send_message_get_message(request, message_address=0xffffffff, - ack_address_override=candidate_address, - tx_power=TxPower.Lowest) - response_parse(response, self.pod) + # if self.pod.state_progress > PodProgress.PairingSuccess: + # raise PdmError("Pod is already paired") - self._assert_pod_can_activate() - - self.pod.var_activation_date = time.time() - pod_date = datetime.utcfromtimestamp(self.pod.var_activation_date) \ - + timedelta(minutes=self.pod.var_utc_offset) - - year = pod_date.year - month = pod_date.month - day = pod_date.day - hour = pod_date.hour - minute = pod_date.minute - - radio.message_sequence = 1 - request = request_setup_pod(self.pod.id_lot, self.pod.id_t, candidate_address, - year, month, day, hour, minute) - response = self.get_radio().send_message_get_message(request, message_address=0xffffffff, - ack_address_override=candidate_address, - tx_power=TxPower.Lowest) - response_parse(response, self.pod) + self.pod.var_utc_offset = utc_offset + radio = None - self._assert_pod_paired() + if self.pod.state_progress is None or \ + self.pod.state_progress < PodProgress.TankFillCompleted: - pkt_seq_saved = radio.packet_sequence - radio = self.get_radio(new=True) - radio.radio_address = candidate_address - radio.message_sequence = 2 - radio.packet_sequence = pkt_seq_saved + self.pod.radio_address = 0xffffffff - self.pod.nonce_seed = 0 - self.pod.nonce_last = None + radio = self.get_radio(new=True) + radio.radio_address = 0xffffffff + request = request_assign_address(candidate_address) + response = self.get_radio().send_message_get_message(request, message_address=0xffffffff, + ack_address_override=candidate_address) + response_parse(response, self.pod) - if self.pod.var_alert_low_reservoir is not None: - request = request_set_low_reservoir_alert(self.pod.var_alert_low_reservoir) - self.send_request(request, with_nonce=True) + self._assert_pod_can_activate() + else: + self._internal_update_status() - request = request_set_generic_alert(5, 55) - self.send_request(request, with_nonce=True) + if self.pod.state_progress == PodProgress.TankFillCompleted: - request = request_delivery_flags(0, 0) - self.send_request(request, with_nonce=True) + self.pod.var_activation_date = self.get_time() + pod_date = datetime.utcfromtimestamp(self.pod.var_activation_date) \ + + timedelta(minutes=self.pod.var_utc_offset) - request = request_prime_cannula() - self.send_request(request, with_nonce=True) + year = pod_date.year + month = pod_date.month + day = pod_date.day + hour = pod_date.hour + minute = pod_date.minute - time.sleep(55) + if radio is None: + radio = self.get_radio(new=True) + radio.radio_address = 0xffffffff - # while self.pod.state_progress == PodProgress.Purging: - # time.sleep(5) - # self._internal_update_status() + radio.message_sequence = 1 - if self.pod.var_alert_replace_pod is not None: - request = request_set_pod_expiry_alert(self.pod.var_alert_replace_pod - self.pod.state_active_minutes) - self.send_request(request, with_nonce=True) - else: - self._internal_update_status() + request = request_setup_pod(self.pod.id_lot, self.pod.id_t, candidate_address, + year, month, day, hour, minute) + response = self.get_radio().send_message_get_message(request, message_address=0xffffffff, + ack_address_override=candidate_address, + tx_power=TxPower.Low) + response_parse(response, self.pod) + self._assert_pod_paired() - if self.pod.state_progress != PodProgress.ReadyForInjection: - raise PdmError("Pod did not reach ready for injection stage") + self.pod.last_command["success"] = True - except OmnipyError: - raise + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.pair_pod(candidate_address, utc_offset) except Exception as e: raise PdmError("Unexpected error") from e finally: self._savePod() - def inject_and_start(self, basal_schedule): + def activate_pod(self): try: with PdmLock(): - if self.pod.state_progress != PodProgress.ReadyForInjection: - raise PdmError("Pod is not at the injection stage") + self.logger.debug("Activating pod") + self.pod.last_command = {"command": "ACTIVATE", + "success": False} + radio = self.get_radio(new=True) + self._internal_update_status() - self._assert_basal_schedule_is_valid(basal_schedule) + # if self.pod.state_progress > PodProgress.ReadyForInjection: + # raise PdmError("Pod is already activated") + + if self.pod.state_progress == PodProgress.PairingSuccess: + if radio is not None: + self.pod.radio_packet_sequence = radio.packet_sequence + + + radio.radio_address = self.pod.radio_address + radio.message_sequence = 2 + + self.pod.nonce_seed = 0 + self.pod.nonce_last = None + + # if self.pod.var_alert_low_reservoir is not None: + # if not self.pod.var_alert_low_reservoir_set: + # request = request_set_low_reservoir_alert(self.pod.var_alert_low_reservoir) + # self.send_request(request, with_nonce=True, tx_power=TxPower.Low) + # self.pod.var_alert_low_reservoir_set = True + # + + ac = AlertConfiguration() + ac.activate = True + ac.alert_index = 7 + ac.alert_after_minutes = 5 + ac.alert_duration = 55 + ac.beep_type = BeepType.BipBeepFourTimes + ac.beep_repeat_type = BeepPattern.OnceEveryFiveMinutes + acs = [ac] + request = request_alert_setup(acs) + self.send_request(request, with_nonce=True) - pod_date = datetime.utcnow() + timedelta(minutes=self.pod.var_utc_offset) + # request = request_delivery_flags(0, 0) + # self.send_request(request, with_nonce=True) - hour = pod_date.hour - minute = pod_date.minute - second = pod_date.second + request = request_delivery_flags(0, 0) + self.send_request(request, with_nonce=True) - request = request_set_basal_schedule(basal_schedule, hour, minute, second) - self.send_request(request, with_nonce=True, double_take=True, expect_critical_follow_up=True) + request = request_prime_cannula() + self.send_request(request, with_nonce=True) - if self.pod.state_progress != PodProgress.BasalScheduleSet: - raise PdmError("Pod did not acknowledge basal schedule") + time.sleep(55) - request = request_set_initial_alerts(self.pod.var_activation_date) - self.send_request(request, with_nonce=True, expect_critical_follow_up=True) + self._internal_update_status() + while self.pod.state_progress < PodProgress.ReadyForInjection: + time.sleep(5) + self._internal_update_status() + # if self.pod.state_progress == PodProgress.ReadyForInjection: + # if self.pod.var_alert_replace_pod is not None: + # if not self.pod.var_alert_replace_pod_set: + # request = request_set_pod_expiry_alert(self.pod.var_alert_replace_pod - self.pod.state_active_minutes) + # self.send_request(request, with_nonce=True, tx_power=TxPower.Low) + # self.pod.var_alert_replace_pod_set = True - request = request_insert_cannula() - self.send_request(request, with_nonce=True) + self.pod.last_command["success"] = True - if self.pod.state_progress != PodProgress.Inserting: - raise PdmError("Pod did not acknowledge cannula insertion start") + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.activate_pod() + except Exception as e: + raise PdmError("Unexpected error") from e + finally: + self._savePod() - time.sleep(12) + def inject_and_start(self, basal_schedule): + try: + with PdmLock(): + self.logger.debug("Starting pod") + self.pod.last_command = {"command": "START", + "hourly_rates": basal_schedule, + "success": False} self._internal_update_status() + if self.pod.state_progress >= PodProgress.Running: + raise PdmError("Pod has passed the injection stage") + + if self.pod.state_progress < PodProgress.ReadyForInjection: + raise PdmError("Pod is not ready for injection") + + if self.pod.state_progress == PodProgress.ReadyForInjection: + self._assert_basal_schedule_is_valid(basal_schedule) + + pod_date = datetime.utcnow() + timedelta(minutes=self.pod.var_utc_offset) \ + + timedelta(seconds=self.time_adjustment) + + hour = pod_date.hour + minute = pod_date.minute + second = pod_date.second + + request = request_set_basal_schedule(basal_schedule, hour, minute, second) + self.send_request(request, with_nonce=True, double_take=True, expect_critical_follow_up=True) + + if self.pod.state_progress != PodProgress.BasalScheduleSet: + raise PdmError("Pod did not acknowledge basal schedule") + + if self.pod.state_progress == PodProgress.BasalScheduleSet: + # if not self.pod.var_alert_after_prime_set: + + ac1 = AlertConfiguration() + ac1.activate = False + ac1.alert_index = 7 + ac1.alert_duration = 0 + ac1.alert_after_minutes = 0 + ac1.beep_type = 0 + ac1.beep_repeat_type = 0 + + ac2 = AlertConfiguration() + ac2.activate = False + ac2.alert_index = 0 + ac2.trigger_auto_off = True + ac2.duration = 15 + ac2.beep_repeat_type = 2 + ac2.beep_type = 2 + ac2.alert_duration = 0 + + acs = [ac1, ac2] + request = request_alert_setup(acs) + self.send_request(request, with_nonce=True) + + request = request_insert_cannula() + self.send_request(request, with_nonce=True) - if self.pod.state_progress != PodProgress.Running: - raise PdmError("Pod did not get to running state") + if self.pod.state_progress != PodProgress.Inserting: + raise PdmError("Pod did not acknowledge cannula insertion start") - except OmnipyError: - raise + if self.pod.state_progress == PodProgress.Inserting: + time.sleep(13) + self._internal_update_status() + if self.pod.state_progress != PodProgress.Running: + raise PdmError("Pod did not get to running state") + self.pod.var_insertion_date = self.get_time() + self.pod.last_command["success"] = True + + except StatusUpdateRequired: + self.logger.info("Requesting status update first") + self._internal_update_status(1) + self.inject_and_start(basal_schedule) except Exception as e: raise PdmError("Unexpected error") from e finally: @@ -470,58 +702,17 @@ def _savePod(self): self.pod.nonce_last = nonce.lastNonce self.pod.nonce_seed = nonce.seed - self.pod.Save() - self.logger.debug("Saved pod status") + return self.pod.Save() except Exception as e: raise PdmError("Pod status was not saved") from e - def _is_bolus_running(self): - if self.pod.state_last_updated is not None and self.pod.state_bolus != BolusState.Immediate: - return False - - if self.pod.last_enacted_bolus_amount is not None \ - and self.pod.last_enacted_bolus_start is not None: - - if self.pod.last_enacted_bolus_amount < 0: - return False - - now = time.time() - bolus_end_earliest = (self.pod.last_enacted_bolus_amount * 35) + self.pod.last_enacted_bolus_start - bolus_end_latest = (self.pod.last_enacted_bolus_amount * 45) + 10 + self.pod.last_enacted_bolus_start - if now > bolus_end_latest: - return False - elif now < bolus_end_earliest: - return True - - self._internal_update_status() + def _is_bolus_running(self, no_live_check=False): return self.pod.state_bolus == BolusState.Immediate def _is_basal_schedule_active(self): - if self.pod.state_last_updated is not None and self.pod.state_basal == BasalState.NotRunning: - return False - - self._internal_update_status() return self.pod.state_basal == BasalState.Program def _is_temp_basal_active(self): - if self.pod.state_last_updated is not None and self.pod.state_basal != BasalState.TempBasal: - return False - - if self.pod.last_enacted_temp_basal_start is not None \ - and self.pod.last_enacted_temp_basal_duration is not None: - if self.pod.last_enacted_temp_basal_amount < 0: - return False - now = time.time() - temp_basal_end_earliest = self.pod.last_enacted_temp_basal_start + \ - (self.pod.last_enacted_temp_basal_duration * 3600) - 60 - temp_basal_end_latest = self.pod.last_enacted_temp_basal_start + \ - (self.pod.last_enacted_temp_basal_duration * 3660) + 60 - if now > temp_basal_end_latest: - return False - elif now < temp_basal_end_earliest: - return True - - self._internal_update_status() return self.pod.state_basal == BasalState.TempBasal def _assert_pod_activate_can_start(self): @@ -568,7 +759,8 @@ def _assert_pod_can_activate(self): raise PdmError("Pod is not at the expected state of Tank Fill Completed") def _assert_pod_paired(self): - if self.pod.radio_address is None: + if self.pod.radio_address is None or self.pod.radio_address == 0 \ + or self.pod.radio_address == 0xffffffff: raise PdmError("Radio radio_address not accepted") if self.pod.state_progress != PodProgress.PairingSuccess: @@ -622,4 +814,8 @@ def _assert_immediate_bolus_not_active(self): if self._is_bolus_running(): raise PdmError("Pod is busy delivering a bolus") + def set_time_adjustment(self, adjustment): + self.time_adjustment = adjustment + def get_time(self): + return time.time() + self.time_adjustment \ No newline at end of file diff --git a/podcomm/pod.py b/podcomm/pod.py index 7fd747f..a13d3b8 100644 --- a/podcomm/pod.py +++ b/podcomm/pod.py @@ -1,10 +1,13 @@ from .definitions import * import simplejson as json -from datetime import datetime - +import time +import sqlite3 class Pod: def __init__(self): + self.db_migrated = False + + self.pod_id = None self.id_lot = None self.id_t = None self.id_version_pm = None @@ -27,7 +30,8 @@ def __init__(self): self.state_basal = BasalState.NotRunning self.state_bolus = BolusState.NotRunning self.state_alert = 0 - self.state_active_minutes=0 + self.state_alerts = None + self.state_active_minutes = 0 self.state_faulted = False self.var_maximum_bolus = None @@ -55,40 +59,65 @@ def __init__(self): self.insulin_delivered = 0 self.insulin_canceled = 0 - self.var_utc_offset=None - self.var_activation_date=None + self.var_utc_offset = None + self.var_activation_date = None + self.var_insertion_date = None + self.path = None - self.log_file_path = None + self.path_db = None + self.last_command = None + self.last_command_db_id = None + self.last_command_db_ts = None self.last_enacted_temp_basal_start = None self.last_enacted_temp_basal_duration = None self.last_enacted_temp_basal_amount = None - self.last_enacted_bolus_start = None self.last_enacted_bolus_amount = None + self.last_enacted_bolus_pulse_interval = None + def Save(self, save_as = None): + self._fix_pod_id() + if save_as is not None: - self.path = save_as - self.log_file_path = save_as + POD_LOG_SUFFIX + self.path = save_as + POD_FILE_SUFFIX + self.path_db = save_as + POD_DB_SUFFIX + if self.path is None: - raise ValueError("No filename given") - with open(self.path, "w") as stream: - json.dump(self.__dict__, stream, indent=4, sort_keys=True) + self.path = POD_FILE + POD_FILE_SUFFIX + self.path_db = POD_FILE + POD_DB_SUFFIX + try: + self.last_command_db_id, self.last_command_db_ts = self.log() + except: + pass + + try: + with open(self.path, "w") as stream: + json.dump(self.__dict__, stream, indent=4, sort_keys=True) + except: + pass + + def GetString(self): + return json.dumps(self.__dict__, indent=4, sort_keys=True) @staticmethod - def Load(path, log_file_path=None): - if log_file_path is None: - log_file_path = path + POD_LOG_SUFFIX + def Load(path, db_path=None): + + if db_path is None: + db_path = POD_FILE + POD_DB_SUFFIX with open(path, "r") as stream: d = json.load(stream) p = Pod() p.path = path - p.log_file_path = log_file_path + p.path_db = db_path p.id_lot = d.get("id_lot", None) p.id_t = d.get("id_t", None) + p.pod_id = d.get("pod_id", None) + p._fix_pod_id() + p.id_version_pm = d.get("id_version_pm", None) p.id_version_pi = d.get("id_version_pi", None) p.id_version_unknown_byte = d.get("id_version_unknown_byte", None) @@ -126,19 +155,23 @@ def Load(path, log_file_path=None): p.nonce_seed = d.get("nonce_seed", None) p.nonce_syncword = d.get("nonce_syncword", None) + p.last_command = d.get("last_command", None) p.last_enacted_temp_basal_start = d.get("last_enacted_temp_basal_start", None) p.last_enacted_temp_basal_duration = d.get("last_enacted_temp_basal_duration", None) p.last_enacted_temp_basal_amount = d.get("last_enacted_temp_basal_amount", None) p.last_enacted_bolus_start = d.get("last_enacted_bolus_start", None) p.last_enacted_bolus_amount = d.get("last_enacted_bolus_amount", None) + p.last_enacted_bolus_pulse_interval = d.get("last_enacted_bolus_pulse_interval", None) p.var_utc_offset = d.get("var_utc_offset", None) p.var_activation_date = d.get("var_activation_date", None) + p.var_insertion_date = d.get("var_insertion_date", None) p.var_basal_schedule = d.get("var_basal_schedule", None) p.var_maximum_bolus = d.get("var_maximum_bolus", None) p.var_maximum_temp_basal_rate = d.get("var_maximum_temp_basal_rate", None) p.var_alert_low_reservoir = d.get("var_alert_low_reservoir", None) p.var_alert_replace_pod = d.get("var_alert_replace_pod", None) + p.var_notify_bolus_start = d.get("var_notify_bolus_start", None) p.var_notify_bolus_cancel = d.get("var_notify_bolus_cancel", None) p.var_notify_temp_basal_set = d.get("var_notify_temp_basal_set", None) @@ -152,22 +185,85 @@ def is_active(self): and (self.state_progress == PodProgress.Running or self.state_progress == PodProgress.RunningLow) \ and not self.state_faulted - def _save_with_log(self, original_request): - ds = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] - orq = "----" - if original_request is not None: - orq = original_request - - self.Save() - self.log(original_request) def __str__(self): + self._fix_pod_id() return json.dumps(self.__dict__, indent=4, sort_keys=True) - def log(self, log_message): + def _get_conn(self): + return sqlite3.connect(self.path_db) + + def _ensure_db_structure(self): + if self.db_migrated: + return + + with self._get_conn() as conn: + try: + sql = "PRAGMA journal_mode=WAL;" + conn.execute(sql) + + sql = """ CREATE TABLE IF NOT EXISTS pod_history ( + timestamp real, + pod_state integer, pod_minutes integer, pod_last_command text, + insulin_delivered real, insulin_canceled real, insulin_reservoir real, pod_json text + ) """ + conn.execute(sql) + sql = "ALTER TABLE pod_history ADD COLUMN pod_json text" + conn.execute(sql) + except: + pass + + self.db_migrated = True + + def log(self): try: - with open(self.log_file_path, "a") as stream: - stream.write(json.dumps(self.__dict__, sort_keys=True)) - except Exception as e: - getLogger().warning("Failed to write the following line to the pod log file %s:\n%s\nError: %s" - %(self.log_file_path, log_message, e)) + self._fix_pod_id() + self._ensure_db_structure() + with self._get_conn() as conn: + sql = """ INSERT INTO pod_history (timestamp, pod_state, pod_minutes, pod_last_command, + insulin_delivered, insulin_canceled, insulin_reservoir, pod_json) + VALUES(?,?,?,?,?,?,?,?) """ + + ts = time.time() + values = (ts, self.state_progress, self.state_active_minutes, + str(self.last_command), self.insulin_delivered, self.insulin_canceled, self.insulin_reservoir, + json.dumps(self.__dict__, indent=4, sort_keys=True)) + + c = conn.cursor() + c.execute(sql, values) + return c.lastrowid, ts + except: + getLogger().exception("Error while writing to database") + + def get_bolus_total(self): + total_bolus = 0.0 + last_bolus_time = 0.0 + self._ensure_db_structure() + with self._get_conn() as conn: + cursor = conn.execute("SELECT pod_json, timestamp FROM pod_history WHERE pod_json IS NOT NULL AND pod_state >= 8 ORDER BY timestamp") + for row in cursor: + js = json.loads(row[0]) + if js["last_command"]["command"] == "BOLUS": + if js["last_command"]["success"]: + total_bolus += float(js["insulin_canceled"]) + last_bolus_time = float(row[1]) + cursor.close() + return total_bolus, last_bolus_time + + def get_history(self): + try: + self._ensure_db_structure() + # with self._get_conn() as conn: + # sql = "SELECT rowid, timestamp, pod_state, pod_minutes, pod_last_command," \ + # " insulin_delivered, insulin_canceled, insulin_reservoir FROM pod_history ORDER BY rowid" + # + # with conn.cursor() as c: + # for row in c.fetchall(): + # print(row[4]) + except: + getLogger().exception("Error while writing to database") + + def _fix_pod_id(self): + if self.pod_id is None: + if self.id_t is not None and self.id_lot is not None: + self.pod_id = "L" + str(self.id_lot) + "T" + str(self.id_t) diff --git a/podcomm/pr_rileylink.py b/podcomm/pr_rileylink.py index 559f1f5..b0ce59f 100644 --- a/podcomm/pr_rileylink.py +++ b/podcomm/pr_rileylink.py @@ -1,5 +1,4 @@ import re -import subprocess import struct import time from .packet_radio import PacketRadio, TxPower @@ -7,7 +6,8 @@ from enum import IntEnum from threading import Event from .exceptions import PacketRadioError - +from .manchester import ManchesterCodec +import simplejson as json from bluepy.btle import Peripheral, Scanner, BTLEException XGATT_BATTERYSERVICE_UUID = "180f" @@ -39,55 +39,69 @@ class Response(IntEnum): COMMAND_INTERRUPTED = 0xbb COMMAND_SUCCESS = 0xdd - -class Register(IntEnum): - SYNC1 = 0x00 - SYNC0 = 0x01 - PKTLEN = 0x02 - PKTCTRL1 = 0x03 - PKTCTRL0 = 0x04 - FSCTRL1 = 0x07 - FREQ2 = 0x09 - FREQ1 = 0x0a - FREQ0 = 0x0b - MDMCFG4 = 0x0c - MDMCFG3 = 0x0d - MDMCFG2 = 0x0e - MDMCFG1 = 0x0f - MDMCFG0 = 0x10 - DEVIATN = 0x11 - MCSM0 = 0x14 - FOCCFG = 0x15 - AGCCTRL2 = 0x17 - AGCCTRL1 = 0x18 - AGCCTRL0 = 0x19 - FREND1 = 0x1a - FREND0 = 0x1b - FSCAL3 = 0x1c - FSCAL2 = 0x1d - FSCAL1 = 0x1e - FSCAL0 = 0x1f - TEST1 = 0x24 - TEST0 = 0x25 - PATABLE0 = 0x2e - +def get_fw_reg_id(reg: str) -> int: + reg_dict = { + "SYNC1": 0, + "SYNC0": 1, + "PKTLEN": 2, + "PKTCTRL1": 3, + "PKTCTRL0": 4, + "ADDR": 5, + "CHANNR": 6, + "FSCTRL1": 7, + "FSCTRL0": 8, + "FREQ2": 9, + "FREQ1": 10, + "FREQ0": 11, + "MDMCFG4": 12, + "MDMCFG3": 13, + "MDMCFG2": 14, + "MDMCFG1": 15, + "MDMCFG0": 16, + "DEVIATN": 17, + "MCSM2": 18, + "MCSM1": 19, + "MCSM0": 20, + "FOCCFG": 21, + "BSCFG": 22, + "AGCCTRL2": 23, + "AGCCTRL1": 24, + "AGCCTRL0": 25, + "FREND1": 26, + "FREND0": 27, + "FSCAL3": 28, + "FSCAL2": 29, + "FSCAL1": 30, + "FSCAL0": 31, + "TEST2": None, + "TEST1": 36, + "TEST0": 37, + "PA_TABLE7": None, + "PA_TABLE6": None, + "PA_TABLE5": None, + "PA_TABLE4": None, + "PA_TABLE3": None, + "PA_TABLE2": None, + "PA_TABLE1": None, + "PA_TABLE0": 46 + } + return reg_dict[reg] class Encoding(IntEnum): NONE = 0 MANCHESTER = 1 FOURBSIXB = 2 - -PA_LEVELS = [0x12, - 0x0E, 0x0E, - 0x1D, 0x1D, - 0x34, 0x34, 0x34, - 0x2C, 0x2C, 0x2C, 0x2C, - 0x60, 0x60, 0x60, 0x60, - 0x84, 0x84, 0x84, 0x84, 0x84, - 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, - 0xC0, 0xC0] - +# 0xC0 +10 +# 0xC8 +7 +# 0x84 +5 +# 0x60 0 +# 0x62 -1 +# 0x2C -5 +# 0x34 -10 +# 0x1D -15 +# 0x0E -20 +# 0x12 -30 g_rl_address = None g_rl_version = None @@ -97,7 +111,6 @@ class Encoding(IntEnum): class RileyLink(PacketRadio): def __init__(self): self.peripheral = None - self.pa_level_index = PA_LEVELS.index(0x84) self.data_handle = None self.logger = getLogger() self.packet_logger = get_packet_logger() @@ -106,25 +119,42 @@ def __init__(self): self.response_handle = None self.notify_event = Event() self.initialized = False + self.manchester = ManchesterCodec() + self.version = None def connect(self, force_initialize=False): try: - if self.address is None: - self.address = self._findRileyLink() + already_connected = self._connect_internal() + if not already_connected or force_initialize: + self.init_radio(force_initialize) - if self.peripheral is None: - self.peripheral = Peripheral() + except BTLEException as be: + if self.peripheral is not None: + self.disconnect() + raise PacketRadioError("Error while connecting") from be + except Exception as e: + raise PacketRadioError("Error while connecting") from e - try: - state = self.peripheral.getState() - if state == "conn": - return - except BTLEException: - pass + def _connect_internal(self): + try: + if self.peripheral is not None: + try: + state = self.peripheral.getState() + if state == "conn": + return True + except BTLEException: + pass + if self.address is None: + self.initialized = False + self.address = self._findRileyLink() + + self.peripheral = Peripheral() self._connect_retry(3) self.service = self.peripheral.getServiceByUUID(RILEYLINK_SERVICE_UUID) + self.peripheral = self.service.peripheral + data_char = self.service.getCharacteristics(RILEYLINK_DATA_CHAR_UUID)[0] self.data_handle = data_char.getHandle() @@ -134,14 +164,8 @@ def connect(self, force_initialize=False): response_notify_handle = self.response_handle + 1 notify_setup = b"\x01\x00" self.peripheral.writeCharacteristic(response_notify_handle, notify_setup) + return False - while self.peripheral.waitForNotifications(0.05): - self.peripheral.readCharacteristic(self.data_handle) - - if self.initialized: - self.init_radio(force_initialize) - else: - self.init_radio(True) except BTLEException as be: if self.peripheral is not None: self.disconnect() @@ -228,82 +252,59 @@ def _read_version(self): def init_radio(self, force_init=False): try: - version, v_major, v_minor = self._read_version() + if force_init: + self.initialized = False + self.logger.debug("force initialize, resetting RL") + self.peripheral.writeCharacteristic(self.data_handle, bytes([1, Command.RESET]), withResponse=False) + self.logger.debug("disconnecting") + self.disconnect() + time.sleep(3) + self.logger.debug("reconnecting") + self._connect_internal() + + if self.version is None: + self.version = self._read_version() + v_str, v_major, v_minor = self.version if v_major < 2: self.logger.error("Firmware version is below 2.0") - raise PacketRadioError("Unsupported RileyLink firmware %d.%d (%s)" % - (v_major, v_minor, version)) + raise PacketRadioError("Unsupported RileyLink firmware %s" % v_str) if not force_init: if v_major == 2 and v_minor < 3: - response = self._command(Command.READ_REGISTER, bytes([Register.SYNC1, 0x00])) + response = self._command(Command.READ_REGISTER, bytes([get_fw_reg_id("PKTLEN"), 0x00])) else: - response = self._command(Command.READ_REGISTER, bytes([Register.SYNC1])) - if response is not None and len(response) > 0 and response[0] == 0xA5: - return + response = self._command(Command.READ_REGISTER, bytes([get_fw_reg_id("PKTLEN")])) + if response is not None and len(response) > 0 and response[0] == 0x50: + self.initialized = True + return self._command(Command.RADIO_RESET_CONFIG) - self._command(Command.SET_SW_ENCODING, bytes([Encoding.MANCHESTER])) + self._command(Command.SET_SW_ENCODING, bytes([Encoding.NONE])) self._command(Command.SET_PREAMBLE, bytes([0x66, 0x65])) - frequency = int(433910000 / (24000000 / pow(2, 16))) - self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ0, frequency & 0xff])) - self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ1, (frequency >> 8) & 0xff])) - self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ2, (frequency >> 16) & 0xff])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ0, 0x5f])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ1, 0x14])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ2, 0x12])) - - self._command(Command.UPDATE_REGISTER, bytes([Register.DEVIATN, 0x44])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.DEVIATN, 0x44])) - - self._command(Command.UPDATE_REGISTER, bytes([Register.PKTCTRL1, 0x20])) - self._command(Command.UPDATE_REGISTER, bytes([Register.PKTCTRL0, 0x00])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.PKTCTRL1, 0x60])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.PKTCTRL0, 0x04])) - - self._command(Command.UPDATE_REGISTER, bytes([Register.FSCTRL1, 0x06])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCTRL1, 0x06])) - - self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG4, 0xCA])) - self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG3, 0xBC])) - self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG2, 0x06])) - self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG1, 0x70])) - self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG0, 0x11])) - self._command(Command.UPDATE_REGISTER, bytes([Register.MCSM0, 0x18])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG4, 0xDA])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG3, 0xB5])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG2, 0x12])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG1, 0x23])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG0, 0x11])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.MCSM0, 0x18])) - - self._command(Command.UPDATE_REGISTER, bytes([Register.FOCCFG, 0x17])) - self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL3, 0xE9])) - self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL2, 0x2A])) - self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL1, 0x00])) - self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL0, 0x1F])) - self._command(Command.UPDATE_REGISTER, bytes([Register.TEST1, 0x35])) - self._command(Command.UPDATE_REGISTER, bytes([Register.TEST0, 0x09])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FOCCFG, 0x17])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL3, 0xE9])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL2, 0x2A])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL1, 0x00])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL0, 0x1F])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.TEST2, 0x81])) ## register not defined on RL - # self._command(Command.UPDATE_REGISTER, bytes([Register.TEST1, 0x35])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.TEST0, 0x09])) - - - self._command(Command.UPDATE_REGISTER, bytes([Register.PATABLE0, PA_LEVELS[self.pa_level_index]])) - self._command(Command.UPDATE_REGISTER, bytes([Register.FREND0, 0x00])) - self._command(Command.UPDATE_REGISTER, bytes([Register.SYNC1, 0xA5])) - self._command(Command.UPDATE_REGISTER, bytes([Register.SYNC0, 0x5A])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.PATABLE0, PA_LEVELS[self.pa_level_index]])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.FREND0, 0x00])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.SYNC1, 0xA5])) - # self._command(Command.UPDATE_REGISTER, bytes([Register.SYNC0, 0x5A])) + with open("/home/pi/omnipy/cc1110.json", "r") as ocj: + js = json.load(ocj) + + common_regs = js['common'] + for reg in common_regs: + self._command(Command.UPDATE_REGISTER, bytes([get_fw_reg_id(reg), int(common_regs[reg], base=16)])) + + tx = js['tx'] + tx_mode = [0x01] + for reg in tx: + tx_mode.append(get_fw_reg_id(reg)) + tx_mode.append(int(tx[reg], base=16)) + tx_mode = bytes(tx_mode) + self._command(Command.SET_MODE_REGISTERS, tx_mode) + + rx = js['rx'] + rx_mode = [0x02] + for reg in rx: + rx_mode.append(get_fw_reg_id(reg)) + rx_mode.append(int(rx[reg], base=16)) + rx_mode = bytes(rx_mode) + self._command(Command.SET_MODE_REGISTERS, rx_mode) response = self._command(Command.GET_STATE) if response != b"OK": @@ -314,52 +315,48 @@ def init_radio(self, force_init=False): except Exception as e: raise PacketRadioError("Error while initializing rileylink radio: %s", e) + # def set_f(self, cf, ifb, of): + # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ2, cf >> 16 & 0xFF])) + # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ1, cf >> 8 & 0xFF])) + # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ0, cf & 0xFF])) + # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCTRL1, ifb])) + # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCTRL0, of & 0xFF])) + # self.freq_c = cf + # self.freq_if = ifb + # self.freq_of = of + # e_cf = cf*366.2109375 + # e_ifb = ifb*23437.5 + # e_of = of*1464.84375 + # e_rx = e_cf + e_ifb + e_of + # self.logger.debug(f"Setting cf: {cf}, if: {ifb}, of: {of}") + # self.logger.debug(f"Parameters TX: {e_cf:.0f} RX: {e_rx:.0f} (IF: {e_ifb:.0f} OF: {e_of:.0f})") + def tx_up(self): - try: - if self.pa_level_index < len(PA_LEVELS) - 1: - self.pa_level_index += 1 - self._set_amp(self.pa_level_index) - except Exception as e: - raise PacketRadioError("Error while setting tx up") from e + pass def tx_down(self): - try: - if self.pa_level_index > 0: - self.pa_level_index -= 1 - self._set_amp(self.pa_level_index) - except Exception as e: - raise PacketRadioError("Error while setting tx down") from e + pass def set_tx_power(self, tx_power): - try: - if tx_power is None: - return - elif tx_power == TxPower.Lowest: - self._set_amp(0) - elif tx_power == TxPower.Low: - self._set_amp(PA_LEVELS.index(0x12)) - elif tx_power == TxPower.Normal: - self._set_amp(PA_LEVELS.index(0x60)) - elif tx_power == TxPower.High: - self._set_amp(PA_LEVELS.index(0xC8)) - elif tx_power == TxPower.Highest: - self._set_amp(PA_LEVELS.index(0xC0)) - except Exception as e: - raise PacketRadioError("Error while setting tx level") from e + pass def get_packet(self, timeout=5.0): try: self.connect() - return self._command(Command.GET_PACKET, struct.pack(">BL", 0, int(timeout * 1000)), + result = self._command(Command.GET_PACKET, struct.pack(">BL", 0, int(timeout * 1000)), timeout=float(timeout)+0.5) + if result is not None: + return result[0:2] + self.manchester.decode(result[2:]) + else: + return None except Exception as e: raise PacketRadioError("Error while getting radio packet") from e def send_and_receive_packet(self, packet, repeat_count, delay_ms, timeout_ms, retry_count, preamble_ext_ms): - try: self.connect() - return self._command(Command.SEND_AND_LISTEN, + data = self.manchester.encode(packet) + result = self._command(Command.SEND_AND_LISTEN, struct.pack(">BBHBLBH", 0, repeat_count, @@ -368,35 +365,28 @@ def send_and_receive_packet(self, packet, repeat_count, delay_ms, timeout_ms, re timeout_ms, retry_count, preamble_ext_ms) - + packet, + + data, timeout=30) + if result is not None: + return result[0:2] + self.manchester.decode(result[2:]) + else: + return None except Exception as e: raise PacketRadioError("Error while sending and receiving data") from e def send_packet(self, packet, repeat_count, delay_ms, preamble_extension_ms): try: self.connect() + data = self.manchester.encode(packet) result = self._command(Command.SEND_PACKET, struct.pack(">BBHH", 0, repeat_count, delay_ms, - preamble_extension_ms) + packet, + preamble_extension_ms) + data, timeout=30) return result except Exception as e: raise PacketRadioError("Error while sending data") from e def _set_amp(self, index=None): - try: - if index is not None: - previous_level = self.pa_level_index - self.pa_level_index = index - if PA_LEVELS[previous_level] == PA_LEVELS[index]: - return - self.connect() - self._command(Command.UPDATE_REGISTER, bytes([Register.PATABLE0, PA_LEVELS[self.pa_level_index]])) - self.packet_logger.debug("Setting pa to %02X (%d of %d)" % (PA_LEVELS[self.pa_level_index], self.pa_level_index, len(PA_LEVELS))) - except PacketRadioError: - self.logger.exception("Error while setting tx amplification") - raise - + pass def _findRileyLink(self): global g_rl_address @@ -428,13 +418,14 @@ def _connect_retry(self, retries): except BTLEException as btlee: self.logger.warning("BTLE exception trying to connect: %s" % btlee) try: - p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) - out, err = p.communicate() - for line in out.splitlines(): - if "bluepy-helper" in line: - pid = int(line.split(None, 1)[0]) - os.kill(pid, 9) - break + os.system("sudo killall -9 bluepy-helper") + # p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) + # out, err = p.communicate() + # for line in out.splitlines(): + # if "bluepy-helper" in line: + # pid = int(line.split(None, 1)[0]) + # os.kill(pid, 9) + # break except: self.logger.warning("Failed to kill bluepy-helper") time.sleep(1) diff --git a/podcomm/pr_rl2.py b/podcomm/pr_rl2.py new file mode 100644 index 0000000..a887c77 --- /dev/null +++ b/podcomm/pr_rl2.py @@ -0,0 +1,484 @@ +pr_rileylink.pyimport re +import subprocess +import struct +import time +from .packet_radio import PacketRadio, TxPower +from .definitions import * +from enum import IntEnum +from threading import Event +from .exceptions import PacketRadioError +from .manchester import ManchesterCodec + +from bluepy.btle import Peripheral, Scanner, BTLEException + +XGATT_BATTERYSERVICE_UUID = "180f" +XGATT_BATTERY_CHAR_UUID = "2a19" +RILEYLINK_SERVICE_UUID = "0235733b-99c5-4197-b856-69219c2a3845" +RILEYLINK_DATA_CHAR_UUID = "c842e849-5028-42e2-867c-016adada9155" +RILEYLINK_RESPONSE_CHAR_UUID = "6e6c7910-b89e-43a5-a0fe-50c5e2b81f4a" + +class Command(IntEnum): + GET_STATE = 1 + GET_VERSION = 2 + GET_PACKET = 3 + SEND_PACKET = 4 + SEND_AND_LISTEN = 5 + UPDATE_REGISTER = 6 + RESET = 7 + LED = 8 + READ_REGISTER = 9 + SET_MODE_REGISTERS = 10 + SET_SW_ENCODING = 11 + SET_PREAMBLE = 12 + RADIO_RESET_CONFIG = 13 + + +class Response(IntEnum): + PROTOCOL_SYNC = 0x00 + UNKNOWN_COMMAND = 0x22 + RX_TIMEOUT = 0xaa + COMMAND_INTERRUPTED = 0xbb + COMMAND_SUCCESS = 0xdd + + +class Register(IntEnum): + SYNC1 = 0x00 + SYNC0 = 0x01 + PKTLEN = 0x02 + PKTCTRL1 = 0x03 + PKTCTRL0 = 0x04 + ADDR = 0x05 + CHANNR = 0x06 + FSCTRL1 = 0x07 + FSCTRL0 = 0x08 + FREQ2 = 0x09 + FREQ1 = 0x0a + FREQ0 = 0x0b + MDMCFG4 = 0x0c + MDMCFG3 = 0x0d + MDMCFG2 = 0x0e + MDMCFG1 = 0x0f + MDMCFG0 = 0x10 + DEVIATN = 0x11 + MCSM2 = 0x12 + MCSM1 = 0x13 + MCSM0 = 0x14 + FOCCFG = 0x15 + BSCFG = 0x16 + AGCCTRL2 = 0x17 + AGCCTRL1 = 0x18 + AGCCTRL0 = 0x19 + FREND1 = 0x1a + FREND0 = 0x1b + FSCAL3 = 0x1c + FSCAL2 = 0x1d + FSCAL1 = 0x1e + FSCAL0 = 0x1f + TEST1 = 0x24 + TEST0 = 0x25 + PATABLE0 = 0x2e + + +class Encoding(IntEnum): + NONE = 0 + MANCHESTER = 1 + FOURBSIXB = 2 + +# 0xC0 +10 +# 0xC8 +7 +# 0x84 +5 +# 0x60 0 +# 0x62 -1 +# 0x2C -5 +# 0x34 -10 +# 0x1D -15 +# 0x0E -20 +# 0x12 -30 + +g_rl_address = None +g_rl_version = None +g_rl_v_major = None +g_rl_v_minor = None + +class RileyLink(PacketRadio): + def __init__(self): + self.peripheral = None + self.data_handle = None + self.logger = getLogger() + self.packet_logger = get_packet_logger() + self.address = g_rl_address + self.service = None + self.response_handle = None + self.notify_event = Event() + self.initialized = False + self.manchester = ManchesterCodec() + self.version = None + + def connect(self, force_initialize=False): + try: + already_connected = self._connect_internal() + if not already_connected or force_initialize: + self.init_radio(force_initialize) + + except BTLEException as be: + if self.peripheral is not None: + self.disconnect() + raise PacketRadioError("Error while connecting") from be + except Exception as e: + raise PacketRadioError("Error while connecting") from e + + def _connect_internal(self): + try: + if self.peripheral is not None: + try: + state = self.peripheral.getState() + if state == "conn": + return True + except BTLEException: + pass + + if self.address is None: + self.initialized = False + self.address = self._findRileyLink() + + self.peripheral = Peripheral() + self._connect_retry(3) + + self.service = self.peripheral.getServiceByUUID(RILEYLINK_SERVICE_UUID) + self.peripheral = self.service.peripheral + + data_char = self.service.getCharacteristics(RILEYLINK_DATA_CHAR_UUID)[0] + self.data_handle = data_char.getHandle() + + char_response = self.service.getCharacteristics(RILEYLINK_RESPONSE_CHAR_UUID)[0] + self.response_handle = char_response.getHandle() + + response_notify_handle = self.response_handle + 1 + notify_setup = b"\x01\x00" + self.peripheral.writeCharacteristic(response_notify_handle, notify_setup) + return False + + except BTLEException as be: + if self.peripheral is not None: + self.disconnect() + raise PacketRadioError("Error while connecting") from be + except Exception as e: + raise PacketRadioError("Error while connecting") from e + + def disconnect(self, ignore_errors=True): + try: + if self.peripheral is None: + self.logger.info("Already disconnected") + return + self.logger.info("Disconnecting..") + if self.response_handle is not None: + response_notify_handle = self.response_handle + 1 + notify_setup = b"\x00\x00" + self.peripheral.writeCharacteristic(response_notify_handle, notify_setup) + except Exception as e: + if not ignore_errors: + raise PacketRadioError("Error while disconnecting") from e + finally: + try: + if self.peripheral is not None: + self.peripheral.disconnect() + self.peripheral = None + except BTLEException as be: + if ignore_errors: + self.logger.exception("Ignoring btle exception during disconnect") + else: + raise PacketRadioError("Error while disconnecting") from be + except Exception as e: + raise PacketRadioError("Error while disconnecting") from e + + def get_info(self): + try: + self.connect() + bs = self.peripheral.getServiceByUUID(XGATT_BATTERYSERVICE_UUID) + bc = bs.getCharacteristics(XGATT_BATTERY_CHAR_UUID)[0] + bch = bc.getHandle() + battery_value = int(self.peripheral.readCharacteristic(bch)[0]) + self.logger.debug("Battery level read: %d", battery_value) + version, v_major, v_minor = self._read_version() + return { "battery_level": battery_value, "mac_address": self.address, + "version_string": version, "version_major": v_major, "version_minor": v_minor } + except Exception as e: + raise PacketRadioError("Error communicating with RileyLink") from e + finally: + self.disconnect() + + def _read_version(self): + global g_rl_version, g_rl_v_major, g_rl_v_minor + version = None + try: + if g_rl_version is not None: + return g_rl_version, g_rl_v_major, g_rl_v_minor + else: + response = self._command(Command.GET_VERSION) + if response is not None and len(response) > 0: + version = response.decode("ascii") + self.logger.debug("RL reports version string: %s" % version) + g_rl_version = version + + if version is None: + return "0.0", 0, 0 + + try: + m = re.search(".+([0-9]+)\\.([0-9]+)", version) + if m is None: + raise PacketRadioError("Failed to parse firmware version string: %s" % version) + + g_rl_v_major = int(m.group(1)) + g_rl_v_minor = int(m.group(2)) + self.logger.debug("Interpreted version major: %d minor: %d" % (g_rl_v_major, g_rl_v_minor)) + + return g_rl_version, g_rl_v_major, g_rl_v_minor + + except Exception as ex: + raise PacketRadioError("Failed to parse firmware version string: %s" % version) from ex + + except PacketRadioError: + raise + except Exception as e: + raise PacketRadioError("Error while reading version") from e + + def init_radio(self, force_init=False): + try: + if force_init: + self.initialized = False + self.logger.debug("force initialize, resetting RL") + self.peripheral.writeCharacteristic(self.data_handle, bytes([1, Command.RESET]), withResponse=False) + self.logger.debug("disconnecting") + self.disconnect() + time.sleep(3) + self.logger.debug("reconnecting") + self._connect_internal() + + if self.version is None: + self.version = self._read_version() + + v_str, v_major, v_minor = self.version + if v_major < 2: + self.logger.error("Firmware version is below 2.0") + raise PacketRadioError("Unsupported RileyLink firmware %s" % v_str) + + if not force_init: + if v_major == 2 and v_minor < 3: + response = self._command(Command.READ_REGISTER, bytes([Register.PKTLEN, 0x00])) + else: + response = self._command(Command.READ_REGISTER, bytes([Register.PKTLEN])) + if response is not None and len(response) > 0 and response[0] == 0x50: + self.initialized = True + return + + self._command(Command.RADIO_RESET_CONFIG) + self._command(Command.SET_SW_ENCODING, bytes([Encoding.NONE])) + self._command(Command.SET_PREAMBLE, bytes([0x66, 0x65])) + + self._command(Command.UPDATE_REGISTER, bytes([Register.SYNC1, 0xA5])) + self._command(Command.UPDATE_REGISTER, bytes([Register.SYNC0, 0x5A])) + self._command(Command.UPDATE_REGISTER, bytes([Register.PKTLEN, 0x50])) + self._command(Command.UPDATE_REGISTER, bytes([Register.PKTCTRL1, 0x20])) + self._command(Command.UPDATE_REGISTER, bytes([Register.PKTCTRL0, 0x00])) + self._command(Command.UPDATE_REGISTER, bytes([Register.ADDR, 0x00])) + self._command(Command.UPDATE_REGISTER, bytes([Register.CHANNR, 0x00])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FSCTRL1, 0x0F])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FSCTRL0, 0x00])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ2, 0x12])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ1, 0x14])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ0, 0x50])) + self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG4, 0xFA])) # CA + self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG3, 0xB9])) # BC + self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG2, 0x12])) # 02 + self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG1, 0x41])) # 40 + self._command(Command.UPDATE_REGISTER, bytes([Register.MDMCFG0, 0xF0])) # 11 + self._command(Command.UPDATE_REGISTER, bytes([Register.DEVIATN, 0x36])) # 54 + self._command(Command.UPDATE_REGISTER, bytes([Register.MCSM2, 0x07])) + self._command(Command.UPDATE_REGISTER, bytes([Register.MCSM1, 0x30])) + self._command(Command.UPDATE_REGISTER, bytes([Register.MCSM0, 0x19])) # 19 + self._command(Command.UPDATE_REGISTER, bytes([Register.FOCCFG, 0x00])) # 17 + self._command(Command.UPDATE_REGISTER, bytes([Register.BSCFG, 0x6C])) + self._command(Command.UPDATE_REGISTER, bytes([Register.AGCCTRL2, 0x43])) + self._command(Command.UPDATE_REGISTER, bytes([Register.AGCCTRL1, 0x40])) + self._command(Command.UPDATE_REGISTER, bytes([Register.AGCCTRL0, 0x91])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FREND1, 0x56])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FREND0, 0x10])) # 0x00 + self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL3, 0xE9])) # 0xEA + self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL2, 0x2A])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL1, 0x00])) + self._command(Command.UPDATE_REGISTER, bytes([Register.FSCAL0, 0x1F])) + #self._command(Command.UPDATE_REGISTER, bytes([Register.TEST2, 0x88])) + self._command(Command.UPDATE_REGISTER, bytes([Register.TEST1, 0x31])) # 0x35 + self._command(Command.UPDATE_REGISTER, bytes([Register.TEST0, 0x09])) + self._command(Command.UPDATE_REGISTER, bytes([Register.PATABLE0, 0x60])) # ?C8 + + tx_mode = bytes([0x01, + Register.FREQ2, 0x12, + Register.FREQ1, 0x14, + Register.FREQ0, 0x56, + ]) + + rx_mode = bytes([0x02, + Register.FREQ2, 0x12, + Register.FREQ1, 0x14, + Register.FREQ0, 0x71, + ]) + + # self._command(Command.SET_MODE_REGISTERS, tx_mode) + # self._command(Command.SET_MODE_REGISTERS, rx_mode) + + response = self._command(Command.GET_STATE) + if response != b"OK": + raise PacketRadioError("Rileylink state is not OK. Response returned: %s" % response) + + self.initialized = True + + except Exception as e: + raise PacketRadioError("Error while initializing rileylink radio: %s", e) + + # def set_f(self, cf, ifb, of): + # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ2, cf >> 16 & 0xFF])) + # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ1, cf >> 8 & 0xFF])) + # self._command(Command.UPDATE_REGISTER, bytes([Register.FREQ0, cf & 0xFF])) + # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCTRL1, ifb])) + # self._command(Command.UPDATE_REGISTER, bytes([Register.FSCTRL0, of & 0xFF])) + # self.freq_c = cf + # self.freq_if = ifb + # self.freq_of = of + # e_cf = cf*366.2109375 + # e_ifb = ifb*23437.5 + # e_of = of*1464.84375 + # e_rx = e_cf + e_ifb + e_of + # self.logger.debug(f"Setting cf: {cf}, if: {ifb}, of: {of}") + # self.logger.debug(f"Parameters TX: {e_cf:.0f} RX: {e_rx:.0f} (IF: {e_ifb:.0f} OF: {e_of:.0f})") + + def tx_up(self): + pass + + def tx_down(self): + pass + + def set_tx_power(self, tx_power): + pass + + def get_packet(self, timeout=5.0): + try: + self.connect() + result = self._command(Command.GET_PACKET, struct.pack(">BL", 0, int(timeout * 1000)), + timeout=float(timeout)+0.5) + if result is not None: + return result[0:2] + self.manchester.decode(result[2:]) + else: + return None + except Exception as e: + raise PacketRadioError("Error while getting radio packet") from e + + def send_and_receive_packet(self, packet, repeat_count, delay_ms, timeout_ms, retry_count, preamble_ext_ms): + try: + self.connect() + data = self.manchester.encode(packet) + result = self._command(Command.SEND_AND_LISTEN, + struct.pack(">BBHBLBH", + 0, + repeat_count, + delay_ms, + 0, + timeout_ms, + retry_count, + preamble_ext_ms) + + data, + timeout=30) + if result is not None: + return result[0:2] + self.manchester.decode(result[2:]) + else: + return None + except Exception as e: + raise PacketRadioError("Error while sending and receiving data") from e + + def send_packet(self, packet, repeat_count, delay_ms, preamble_extension_ms): + try: + self.connect() + data = self.manchester.encode(packet) + result = self._command(Command.SEND_PACKET, struct.pack(">BBHH", 0, repeat_count, delay_ms, + preamble_extension_ms) + data, + timeout=30) + return result + except Exception as e: + raise PacketRadioError("Error while sending data") from e + + def _set_amp(self, index=None): + pass + + def _findRileyLink(self): + global g_rl_address + scanner = Scanner() + g_rl_address = None + self.logger.debug("Scanning for RileyLink") + retries = 10 + while g_rl_address is None and retries > 0: + retries -= 1 + for result in scanner.scan(1.0): + if result.getValueText(7) == RILEYLINK_SERVICE_UUID: + self.logger.debug("Found RileyLink") + g_rl_address = result.addr + + if g_rl_address is None: + raise PacketRadioError("Could not find RileyLink") + + return g_rl_address + + def _connect_retry(self, retries): + while retries > 0: + retries -= 1 + self.logger.info("Connecting to RileyLink, retries left: %d" % retries) + + try: + self.peripheral.connect(self.address) + self.logger.info("Connected") + break + except BTLEException as btlee: + self.logger.warning("BTLE exception trying to connect: %s" % btlee) + try: + os.system("sudo killall -9 bluepy-helper") + # p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) + # out, err = p.communicate() + # for line in out.splitlines(): + # if "bluepy-helper" in line: + # pid = int(line.split(None, 1)[0]) + # os.kill(pid, 9) + # break + except: + self.logger.warning("Failed to kill bluepy-helper") + time.sleep(1) + + def _command(self, command_type, command_data=None, timeout=10.0): + try: + if command_data is None: + data = bytes([1, command_type]) + else: + data = bytes([len(command_data) + 1, command_type]) + command_data + + self.peripheral.writeCharacteristic(self.data_handle, data, withResponse=True) + + if not self.peripheral.waitForNotifications(timeout): + raise PacketRadioError("Timed out while waiting for a response from RileyLink") + + response = self.peripheral.readCharacteristic(self.data_handle) + + if response is None or len(response) == 0: + raise PacketRadioError("RileyLink returned no response") + else: + if response[0] == Response.COMMAND_SUCCESS: + return response[1:] + elif response[0] == Response.COMMAND_INTERRUPTED: + self.logger.warning("A previous command was interrupted") + return response[1:] + elif response[0] == Response.RX_TIMEOUT: + return None + else: + raise PacketRadioError("RileyLink returned error code: %02X. Additional response data: %s" + % (response[0], response[1:]), response[0]) + except PacketRadioError: + raise + except Exception as e: + raise PacketRadioError("Error executing command") from e diff --git a/podcomm/protocol.py b/podcomm/protocol.py index 0a880e3..ca6edb2 100644 --- a/podcomm/protocol.py +++ b/podcomm/protocol.py @@ -1,4 +1,3 @@ -from podcomm.pod import Pod from podcomm.protocol_common import * from podcomm.definitions import * from enum import IntEnum @@ -30,106 +29,55 @@ def request_setup_pod(lot, tid, address, year, month, day, hour, minute): return PdmMessage(PdmRequest.SetupPod, cmd_body) -def request_set_low_reservoir_alert(iu_reservoir_level): - cmd_body = _alert_configuration_message(PodAlertBit.LowReservoir, - activate=True, - trigger_auto_off=False, - trigger_reservoir=True, - duration_minutes=60, - alert_after_reservoir=iu_reservoir_level, - beep_type=BeepType.BipBip, - beep_repeat_type=BeepPattern.OnceEveryHour) - return PdmMessage(PdmRequest.ConfigureAlerts, cmd_body) - - -def request_clear_low_reservoir_alert(): - cmd_body = _alert_configuration_message(PodAlertBit.LowReservoir, - activate=False, - trigger_auto_off=False, - trigger_reservoir=True, - duration_minutes=0, - alert_after_reservoir=0, - beep_type=BeepType.NoSound, - beep_repeat_type=BeepPattern.OnceEveryHour) - return PdmMessage(PdmRequest.ConfigureAlerts, cmd_body) - - -def request_set_pod_expiry_alert(minutes_after_activation): - cmd_body = _alert_configuration_message(PodAlertBit.LowReservoir, - activate=True, - trigger_auto_off=False, - trigger_reservoir=False, - duration_minutes=60, - alert_after_minutes=minutes_after_activation, - beep_type=BeepType.BipBip, - beep_repeat_type=BeepPattern.OnceEveryHour) - return PdmMessage(PdmRequest.ConfigureAlerts, cmd_body) +def request_alert_setup(alert_configurations): + cmd_body = bytes() + for ac in alert_configurations: + if ac.alert_after_minutes is None and ac.alert_after_reservoir is None and ac.activate: + raise PdmError("Either alert_after_minutes or alert_after_reservoir must be set") + elif ac.alert_after_minutes is not None and ac.alert_after_reservoir is not None: + raise PdmError("Only one of alert_after_minutes or alert_after_reservoir must be set") -def request_clear_pod_expiry_alert(): - cmd_body = _alert_configuration_message(PodAlertBit.LowReservoir, - activate=False, - trigger_auto_off=False, - trigger_reservoir=False, - duration_minutes=0, - alert_after_minutes=0, - beep_type=BeepType.NoSound, - beep_repeat_type=BeepPattern.Once) - return PdmMessage(PdmRequest.ConfigureAlerts, cmd_body) - - -def request_set_generic_alert(minutes_after_set, repeat_interval): - cmd_body = _alert_configuration_message(PodAlertBit.TimerLimit, - activate=True, - trigger_auto_off=False, - duration_minutes=minutes_after_set, - alert_after_minutes=repeat_interval, - beep_repeat_type=BeepPattern.OnceEveryMinuteForThreeMinutesAndRepeatEveryFifteenMinutes, - beep_type=BeepType.BipBipBipTwice) - return PdmMessage(PdmRequest.ConfigureAlerts, cmd_body) - + if ac.alert_duration is not None: + if ac.alert_duration > 0x1FF: + raise PdmError("Alert duration in minutes cannot be more than %d" % 0x1ff) + elif ac.alert_duration < 0: + raise PdmError("Invalid alert duration value") + + if ac.alert_after_minutes is not None and ac.alert_after_minutes > 4800: + raise PdmError("Alert cannot be set beyond 80 hours") + if ac.alert_after_minutes is not None and ac.alert_after_minutes < 0: + raise PdmError("Invalid value for alert_after_minutes") + + if ac.alert_after_reservoir is not None and ac.alert_after_reservoir > 50: + raise PdmError("Alert cannot be set for more than 50 units") + if ac.alert_after_reservoir is not None and ac.alert_after_reservoir < 0: + raise PdmError("Invalid value for alert_after_reservoir") + + b0 = ac.alert_index << 4 + if ac.activate: + b0 |= 0x08 + if ac.alert_after_reservoir is not None: + b0 |= 0x04 + if ac.trigger_auto_off: + b0 |= 0x02 + + b0 |= (ac.alert_duration >> 8) & 0x0001 + b1 = ac.alert_duration & 0x00ff + b2 = 0 + b3 = 0 + if ac.alert_after_reservoir is not None: + reservoir_limit = int(ac.alert_after_reservoir * 10) + b2 = reservoir_limit >> 8 + b3 = reservoir_limit & 0x00ff + if ac.alert_after_minutes is not None: + b2 = ac.alert_after_minutes >> 8 + b3 = ac.alert_after_minutes & 0x00ff + + cmd_body += bytes([b0, b1, b2, b3, ac.beep_repeat_type, ac.beep_type]) -def request_clear_generic_alert(): - cmd_body = _alert_configuration_message(PodAlertBit.TimerLimit, - activate=True, - trigger_auto_off=False, - duration_minutes=0, - alert_after_minutes=0, - beep_repeat_type=BeepPattern.Once, - beep_type=BeepType.NoSound) return PdmMessage(PdmRequest.ConfigureAlerts, cmd_body) -def request_set_initial_alerts(activation_date): - - minutes_past_activation = int(time.time() - activation_date) + 1 - minutes_to_72hours = (72*60) - minutes_past_activation - minutes_to_80hours = (80*60) - minutes_past_activation - - cmd_body = _alert_configuration_message(PodAlertBit.TimerLimit, - activate=True, - trigger_auto_off=False, - alert_after_minutes=minutes_to_72hours, - duration_minutes=7*60, - beep_repeat_type=BeepPattern.OnceEveryHour, - beep_type=BeepType.BipBeepFourTimes) - - cmd_body += _alert_configuration_message(PodAlertBit.EndOfService, - activate=True, - trigger_auto_off=False, - alert_after_minutes=minutes_to_80hours, - duration_minutes=15, - beep_repeat_type=BeepPattern.OnceEveryHour, - beep_type=BeepType.BipBeepFourTimes) - - cmd_body += _alert_configuration_message(PodAlertBit.AutoOff, - activate=False, - trigger_auto_off=False, - alert_after_minutes=0, - duration_minutes=0, - beep_repeat_type=BeepPattern.Once, - beep_type=BeepType.NoSound) - - return PdmMessage(PdmRequest.ConfigureAlerts, cmd_body) def request_set_basal_schedule(schedule, hour, minute, second): halved_schedule = [] @@ -224,8 +172,8 @@ def request_purge_insulin(iu_to_purge): delivery_delay=1) -def request_bolus(iu_bolus): - return _bolus_message(pulse_count=int(iu_bolus / DECIMAL_0_05)) +def request_bolus(iu_bolus, pulse_interval): + return _bolus_message(pulse_count=int(iu_bolus / DECIMAL_0_05), pulse_speed=8*pulse_interval, delivery_delay=pulse_interval) def request_cancel_bolus(): @@ -304,11 +252,12 @@ def response_parse(response, pod): parse_status_response(response_body, pod) else: raise ProtocolError("Unknown response type %02X" % response_type) + pod.Save() def parse_information_response(response, pod): if response[0] == 0x01: - pass + pod.state_alerts = struct.unpack(">8H", response[3:]) elif response[0] == 0x02: pod.state_last_updated = time.time() pod.state_faulted = True @@ -417,58 +366,6 @@ def parse_version_response(response, pod): pod.radio_address = struct.unpack(">I", response[16:20])[0] -def _alert_configuration_message(alert_bit, activate, trigger_auto_off, duration_minutes, beep_repeat_type, beep_type, - alert_after_minutes=None, alert_after_reservoir=None, trigger_reservoir=False): - if alert_after_minutes is None: - if alert_after_reservoir is None: - raise PdmError("Either alert_after_minutes or alert_after_reservoir must be set") - elif not trigger_reservoir: - raise PdmError("Trigger insulin_reservoir must be True if alert_after_reservoir is to be set") - else: - if alert_after_reservoir is not None: - raise PdmError("Only one of alert_after_minutes or alert_after_reservoir must be set") - elif trigger_reservoir: - raise PdmError("Trigger insulin_reservoir must be False if alert_after_minutes is to be set") - - if duration_minutes > 0x1FF: - raise PdmError("Alert duration in minutes cannot be more than %d" % 0x1ff) - elif duration_minutes < 0: - raise PdmError("Invalid alert duration value") - - if alert_after_minutes is not None and alert_after_minutes > 4800: - raise PdmError("Alert cannot be set beyond 80 hours") - if alert_after_minutes is not None and alert_after_minutes < 0: - raise PdmError("Invalid value for alert_after_minutes") - - if alert_after_reservoir is not None and alert_after_reservoir > 50: - raise PdmError("Alert cannot be set for more than 50 units") - if alert_after_reservoir is not None and alert_after_reservoir < 0: - raise PdmError("Invalid value for alert_after_reservoir") - - b0 = alert_bit << 4 - if activate: - b0 |= 0x08 - if trigger_reservoir: - b0 |= 0x04 - if trigger_auto_off: - b0 |= 0x02 - - b0 |= (duration_minutes >> 8) & 0x0001 - b1 = duration_minutes & 0x00ff - - if alert_after_reservoir is not None: - reservoir_limit = int(alert_after_reservoir * 10) - b2 = reservoir_limit >> 8 - b3 = reservoir_limit & 0x00ff - elif alert_after_minutes is not None: - b2 = alert_after_minutes >> 8 - b3 = alert_after_minutes & 0x00ff - else: - raise PdmError("Incorrect alert configuration requested") - - return bytes([b0, b1, b2, b3, beep_repeat_type, beep_type]) - - def _bolus_message(pulse_count, pulse_speed=16, reminders=0, delivery_delay=2): commandBody = bytes([0x02]) @@ -505,4 +402,4 @@ def _cancel_activity_message(basal=False, bolus=False, temp_basal=False): cmd_body = bytes([c]) msg = PdmMessage(PdmRequest.CancelDelivery, cmd_body) - return msg + return msg \ No newline at end of file diff --git a/podcomm/protocol_common.py b/podcomm/protocol_common.py index 265fd02..129647e 100644 --- a/podcomm/protocol_common.py +++ b/podcomm/protocol_common.py @@ -1,4 +1,3 @@ -from typing import List from podcomm.exceptions import PdmError, ProtocolError from enum import IntEnum diff --git a/podcomm/protocol_pdm.py b/podcomm/protocol_pdm.py new file mode 100644 index 0000000..10d9c5a --- /dev/null +++ b/podcomm/protocol_pdm.py @@ -0,0 +1,44 @@ +from podcomm.definitions import OMNIPY_LOGGER +from podcomm.pod import POD_NONCE_SYNCWORD +from podcomm.protocol import response_parse, PdmError, request_status +import logging + + +def update_status(pod, radio, update_type=0): + logger = logging.getLogger(OMNIPY_LOGGER) + try: + logger.info("Updating pod status, request type %d" % update_type) + pod.last_command = {"command": "STATUS", "type": update_type, "success": False} + request = request_status(update_type) + return _send_request(pod, radio, request) + except Exception: + raise + finally: + pod._savePod() + + +def _send_request(pod, radio, request, nonce=None, double_take=False, + expect_critical_follow_up=False): + logger = logging.getLogger(OMNIPY_LOGGER) + if nonce is not None: + request.set_nonce(nonce.getNext()) + pod.data[POD_NONCE_SYNCWORD] = None + + response, rssi = radio.send_message_get_message(request, double_take=double_take, + expect_critical_follow_up=expect_critical_follow_up) + response_parse(response, pod) + + if nonce is not None and pod.data[POD_NONCE_SYNCWORD] is not None: + logger.info("Nonce resync requested") + nonce.sync(pod.data[POD_NONCE_SYNCWORD], request.sequence) + request.set_nonce(nonce.getNext()) + pod.data[POD_NONCE_SYNCWORD] = None + radio.message_sequence = request.sequence + response = radio.send_message_get_message(request, double_take=double_take, + expect_critical_follow_up=expect_critical_follow_up) + response_parse(response, pod) + if pod.nonce_syncword is not None: + nonce.get_nonce().reset() + raise PdmError("Nonce sync failed") + + return rssi \ No newline at end of file diff --git a/podcomm/protocol_radio.py b/podcomm/protocol_radio.py index b2ffd20..87de409 100644 --- a/podcomm/protocol_radio.py +++ b/podcomm/protocol_radio.py @@ -1,9 +1,9 @@ -from .exceptions import PacketRadioError, OmnipyTimeoutError +from .exceptions import PacketRadioError, OmnipyTimeoutError, RecoverableProtocolError, StatusUpdateRequired from podcomm.packet_radio import TxPower from podcomm.protocol_common import * from .pr_rileylink import RileyLink from .definitions import * -from threading import Thread, Event +from threading import Thread, Event, RLock import binascii import time import subprocess @@ -12,12 +12,29 @@ def _ack_data(address1, address2, sequence): return RadioPacket(address1, RadioPacketType.ACK, sequence, struct.pack(">I", address2)) + +class MessageExchange: + def __init__(self): + self.unique_packets = 0 + self.repeated_sends = 0 + self.receive_timeouts = 0 + self.repeated_receives = 0 + self.protocol_errors = 0 + self.bad_packets = 0 + self.radio_errors = 0 + self.successful = False + self.queued = 0 + self.started = 0 + self.ended = 0 + + class PdmRadio: def __init__(self, radio_address, msg_sequence=0, pkt_sequence=0, packet_radio=None): + self.rssi_total = 0 + self.rssi_count = 0 self.radio_address = radio_address self.message_sequence = msg_sequence self.packet_sequence = pkt_sequence - self.last_received_packet = None self.logger = getLogger() self.packet_logger = get_packet_logger() @@ -27,9 +44,8 @@ def __init__(self, radio_address, msg_sequence=0, pkt_sequence=0, packet_radio=N self.packet_radio = packet_radio self.last_packet_received = None - self.last_packet_timestamp = None - self.radio_ready = Event() - self.radio_busy = False + self.last_sync_timestamp = None + self.request_arrived = Event() self.response_received = Event() self.request_shutdown = Event() @@ -40,81 +56,101 @@ def __init__(self, radio_address, msg_sequence=0, pkt_sequence=0, packet_radio=N self.pod_message = None self.response_exception = None self.radio_thread = None + + self.pdm_message = None + self.pdm_message_address = None + self.ack_address_override = None + self.debug_cut_last_ack = False + self.debug_cut_msg_after = None + self.debug_cut_message_seq = 0 + self.debug_cut_packet_seq = 0 + + self.stats = [] + self.current_exchange = MessageExchange() + self.radio_lock = RLock() self.start() def start(self): - self.radio_thread = Thread(target=self._radio_loop) - self.radio_thread.setDaemon(True) - self.radio_thread.start() + with self.radio_lock: + self.radio_thread = Thread(target=self._radio_loop) + self.radio_thread.setDaemon(True) + self._radio_init() + self.radio_thread.start() def stop(self): - self.radio_ready.wait() - self.radio_ready.clear() - self.request_shutdown.set() - self.request_arrived.set() - self.radio_thread.join() - self.request_shutdown.clear() + with self.radio_lock: + self.request_shutdown.set() + self.request_arrived.set() + self.radio_thread.join() + self.radio_thread = None + self.request_shutdown.clear() def send_message_get_message(self, message, message_address = None, ack_address_override=None, tx_power=None, double_take=False, expect_critical_follow_up=False): - if not self.radio_ready.wait(30): - if self.radio_busy: - raise PacketRadioError("Radio is busy") + queued = time.time() + with self.radio_lock: + if self.radio_thread is None: + raise PacketRadioError("Radio is stopped") + + self.pdm_message = message + if message_address is None: + self.pdm_message_address = self.radio_address else: - raise PacketRadioError("Radio is not ready") + self.pdm_message_address = message_address + self.ack_address_override = ack_address_override + self.pod_message = None + self.double_take = double_take + self.tx_power = tx_power + self.expect_critical_follow_up = expect_critical_follow_up - self.radio_ready.clear() + self.request_arrived.set() - self.pdm_message = message - if message_address is None: - self.pdm_message_address = self.radio_address - else: - self.pdm_message_address = message_address - self.ack_address_override = ack_address_override - self.pod_message = None - self.double_take = double_take - self.tx_power = tx_power - self.expect_critical_follow_up = expect_critical_follow_up + self.response_received.wait() + self.response_received.clear() + self.current_exchange.queued = queued - self.request_arrived.set() + if self.pod_message is None: + self.current_exchange.successful = False + self.stats.append(self.current_exchange) + raise self.response_exception - self.response_received.wait() - self.response_received.clear() - if self.pod_message is None: - raise self.response_exception - return self.pod_message + self.current_exchange.successful = True + self.stats.append(self.current_exchange) + return self.pod_message def get_packet(self, timeout=30000): - received = self.packet_radio.get_packet(timeout=timeout) - p, rssi = self._get_packet(received) - return p + with self.radio_lock: + received = self.packet_radio.get_packet(timeout=timeout) + p, rssi = self._get_packet(received) + return p def disconnect(self): + with self.radio_lock: + self._disconnect() + + def _disconnect(self): try: self.packet_radio.disconnect(ignore_errors=True) except Exception: self.logger.exception("Error while disconnecting") def _radio_loop(self): - while not self._radio_init(): - self.logger.warning("Failed to initialize radio, retrying") - time.sleep(5) - - self.radio_ready.set() while True: - if not self.request_arrived.wait(timeout=5.0): - self.disconnect() + if not self.request_arrived.wait(timeout=900.0): + self._disconnect() self.request_arrived.wait() self.request_arrived.clear() if self.request_shutdown.wait(0): - self.disconnect() + self._disconnect() break - self.radio_busy = True + self.current_exchange = MessageExchange() + self.current_exchange.started = time.time() + try: self.pod_message = self._send_and_get(self.pdm_message, self.pdm_message_address, self.ack_address_override, @@ -127,19 +163,22 @@ def _radio_loop(self): if self.response_exception is None: ack_packet = self._final_ack(self.ack_address_override, self.packet_sequence) - self.packet_sequence = (self.packet_sequence + 1) % 32 + self.current_exchange.ended = time.time() self.response_received.set() - try: - self._send_packet(ack_packet) - except Exception as e: - self.logger.exception("Error during ending conversation, ignored.") + if not self.debug_cut_last_ack: + try: + self._send_packet(ack_packet) + except Exception: + self.logger.exception("Error during ending conversation, ignored.") + else: + self.message_sequence = (self.message_sequence - self.debug_cut_message_seq) % 16 + self.packet_sequence = (self.packet_sequence - self.debug_cut_packet_seq) % 16 + self.last_packet_received = None else: + self.current_exchange.ended = time.time() self.response_received.set() - self.radio_ready.set() - self.radio_busy = False - def _interim_ack(self, ack_address_override, sequence): if ack_address_override is None: return _ack_data(self.radio_address, self.radio_address, sequence) @@ -156,30 +195,31 @@ def _radio_init(self, retries=1): retry = 0 while retry < retries: try: - self.disconnect() + self.packet_radio.disconnect() self.packet_radio.connect(force_initialize=True) return True except: self.logger.exception("Error during radio initialization") + self._kill_btle_subprocess() time.sleep(2) retry += 1 return False def _kill_btle_subprocess(self): try: - p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) - out, err = p.communicate() - for line in out.splitlines(): - if "bluepy-helper" in line: - pid = int(line.split(None, 1)[0]) - os.kill(pid, 9) - break + os.system("sudo killall -9 bluepy-helper") + # p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) + # out, err = p.communicate() + # for line in out.splitlines(): + # if "bluepy-helper" in line: + # pid = int(line.split(None, 1)[0]) + # os.kill(pid, 9) + # break except: self.logger.warning("Failed to kill bluepy-helper") def _reset_sequences(self): self.packet_sequence = 0 - self.message_sequence = 0 def _send_and_get(self, pdm_message, pdm_message_address, ack_address_override=None, tx_power=None, double_take=False, expect_critical_follow_up=False): @@ -194,7 +234,6 @@ def _send_and_get(self, pdm_message, pdm_message_address, ack_address_override=N try: if tx_power is not None: self.packet_radio.set_tx_power(tx_power) - self._awaken() except PacketRadioError: if not self._radio_init(3): raise @@ -202,6 +241,8 @@ def _send_and_get(self, pdm_message, pdm_message_address, ack_address_override=N received = None packet_count = len(packets) + self.current_exchange.unique_packets = packet_count * 2 + for part in range(0, packet_count): packet = packets[part] repeat_count = -1 @@ -220,10 +261,14 @@ def _send_and_get(self, pdm_message, pdm_message_address, ack_address_override=N expected_type = RadioPacketType.ACK try: - received = self._exchange_packets(packet.with_sequence(self.packet_sequence), - expected_type=expected_type, - timeout=timeout) - break + if self.debug_cut_msg_after is None or self.debug_cut_msg_after != part: + received = self._exchange_packets(packet.with_sequence(self.packet_sequence), + expected_type=expected_type, + force_extend_preamble=part == 0, + timeout=timeout) + break + else: + raise Exception("debug cut here") except OmnipyTimeoutError: self.logger.debug("Trying to recover from timeout error") if part == 0: @@ -231,25 +276,15 @@ def _send_and_get(self, pdm_message, pdm_message_address, ack_address_override=N timeout = 15 continue elif repeat_count == 1: - self._reset_sequences() timeout = 10 time.sleep(2) continue elif repeat_count == 2: - self._reset_sequences() self._radio_init() timeout = 15 continue else: self.logger.debug("Failed recovery") - if packet_count == 1: - self.logger.debug("Calming pod down in case of reception problem on our end") - ack_packet = self._final_ack(self.ack_address_override, 1) - try: - self.packet_radio.set_tx_power(TxPower.Highest) - self._send_packet(ack_packet) - except Exception as e: - self.logger.exception("Ignored.") self._reset_sequences() raise elif part < packet_count - 1: @@ -266,54 +301,56 @@ def _send_and_get(self, pdm_message, pdm_message_address, ack_address_override=N raise except PacketRadioError: self.logger.debug("Trying to recover from radio error") + self.current_exchange.radio_errors += 1 if part == 0: if repeat_count < 2: self._radio_init() continue elif repeat_count < 4: - self.disconnect() + self._disconnect() self._kill_btle_subprocess() timeout = 10 time.sleep(2) continue else: self.logger.debug("Failed recovery") - self._reset_sequences() raise elif part < packet_count - 1: if repeat_count < 6: - self.disconnect() + self._disconnect() self._kill_btle_subprocess() timeout = 10 time.sleep(2) continue else: self.logger.debug("Failed recovery") - self._reset_sequences() raise else: if repeat_count < 10: - self.disconnect() + self._disconnect() self._kill_btle_subprocess() timeout = 10 time.sleep(2) continue else: self.logger.debug("Failed recovery") - self._reset_sequences() raise + except RecoverableProtocolError as rpe: + self.logger.debug("Trying to recover from protocol error") + self.packet_sequence = (rpe.packet.sequence + 1) % 32 + if expected_type == RadioPacketType.POD and rpe.packet.type == RadioPacketType.ACK: + raise StatusUpdateRequired() + continue except ProtocolError: - if part == 0 and repeat_count == 0: - self.logger.debug("Trying to recover from protocol error") - continue - else: - raise + self.logger.debug("Trying to recover from protocol error") + self.packet_sequence = (self.packet_sequence + 2) % 32 + continue part += 1 self.packet_sequence = (received.sequence + 1) % 32 - self.packet_logger.info("SENT MSG %s" % pdm_message) + part_count = 0 if received.type == RadioPacketType.POD: part_count = 1 self.logger.debug("Received POD message part %d." % part_count) @@ -327,39 +364,67 @@ def _send_and_get(self, pdm_message, pdm_message_address, ack_address_override=N self.packet_logger.info("RCVD MSG %s" % pod_response) self.logger.debug("Send and receive completed.") self.message_sequence = (pod_response.sequence + 1) % 16 + self.packet_sequence = (received.sequence + 1) % 32 return pod_response + def _send_get(self, send_data, force_ext_preamble=False): + if force_ext_preamble or self.last_sync_timestamp is None or time.time() - self.last_sync_timestamp > 5: + received = self._send_get_with_ext(send_data) + else: + received = self._send_get_no_ext(send_data) + + if received is None: + self.last_sync_timestamp = None + else: + self.last_sync_timestamp = time.time() + + return received + + def _send_get_with_ext(self, send_data): + return self.packet_radio.send_and_receive_packet(send_data, 0, 0, 150, 10, 150) + + def _send_get_no_ext(self, send_data): + return self.packet_radio.send_and_receive_packet(send_data, 5, 15, 145, 2, 5) - def _exchange_packets(self, packet_to_send, expected_type, timeout=10): + def _exchange_packets(self, packet_to_send, expected_type, force_extend_preamble=False, timeout=30): start_time = None + first = True while start_time is None or time.time() - start_time < timeout: - received = self.packet_radio.send_and_receive_packet(packet_to_send.get_data(), 0, 0, 100, 1, 130) + if first: + first = False + else: + self.current_exchange.repeated_sends += 1 + + received = self._send_get(packet_to_send.get_data(), force_extend_preamble) + if start_time is None: start_time = time.time() self.packet_logger.info("SEND PKT %s" % packet_to_send) if received is None: + self.current_exchange.receive_timeouts += 1 self.packet_logger.debug("RECV PKT None") self.packet_radio.tx_up() continue p, rssi = self._get_packet(received) if p is None: + self.current_exchange.bad_packets += 1 self.packet_logger.debug("RECV PKT BAD DATA: %s" % received.hex()) self.packet_radio.tx_down() continue self.packet_logger.info("RECV PKT %s" % p) if p.address != self.radio_address: + self.current_exchange.bad_packets += 1 self.packet_logger.debug("RECV PKT ADDR MISMATCH") self.packet_radio.tx_down() continue - self.last_packet_timestamp = time.time() - if self.last_packet_received is not None and \ p.sequence == self.last_packet_received.sequence and \ p.type == self.last_packet_received.type: + self.current_exchange.repeated_receives += 1 self.packet_logger.debug("RECV PKT previous") self.packet_radio.tx_up() continue @@ -369,48 +434,55 @@ def _exchange_packets(self, packet_to_send, expected_type, timeout=10): if expected_type is not None and p.type != expected_type: self.packet_logger.debug("RECV PKT unexpected type %s" % p) - raise ProtocolError("Unexpected packet type received") + self.current_exchange.protocol_errors += 1 + raise RecoverableProtocolError("Unexpected packet type", p) if p.sequence != (packet_to_send.sequence + 1) % 32: self.packet_sequence = (p.sequence + 1) % 32 self.packet_logger.debug("RECV PKT unexpected sequence %s" % p) self.last_packet_received = p - raise ProtocolError("Incorrect packet sequence received") + self.current_exchange.protocol_errors += 1 + raise RecoverableProtocolError("Incorrect packet sequence", p) return p + raise OmnipyTimeoutError("Exceeded timeout while send and receive") - def _send_packet(self, packet_to_send, timeout=25): + def _send_packet(self, packet_to_send, timeout=25, allow_premature_exit_after=None): start_time = None + self.current_exchange.unique_packets += 1 while start_time is None or time.time() - start_time < timeout: try: self.packet_logger.info("SEND PKT %s" % packet_to_send) - - received = self.packet_radio.send_and_receive_packet(packet_to_send.get_data(), 5, 55, 300, 2, 40) + received = self.packet_radio.send_and_receive_packet(packet_to_send.get_data(), 5, 15, 145, 0, 5) if start_time is None: start_time = time.time() - # if self.request_arrived.wait(timeout=0): - # self.logger.debug("Prematurely exiting final phase to process next request") - # return - if received is None: - received = self.packet_radio.get_packet(1.0) - if received is None: - self.packet_logger.debug("Silence") + if allow_premature_exit_after is not None and \ + time.time() - start_time >= allow_premature_exit_after: + if self.request_arrived.wait(timeout=0): + self.logger.debug("Prematurely exiting final phase to process next request") + self.packet_sequence = (self.packet_sequence + 1) % 32 break + if received is None: + self.packet_logger.debug("Silence") + self.packet_sequence = (self.packet_sequence + 1) % 32 + break p, rssi = self._get_packet(received) if p is None: + self.current_exchange.bad_packets += 1 self.packet_logger.debug("RECV PKT bad %s" % received.hex()) self.packet_radio.tx_down() continue if p.address != self.radio_address: + self.current_exchange.bad_packets += 1 self.packet_logger.debug("RECV PKT ADDR MISMATCH") self.packet_radio.tx_down() continue - self.last_packet_timestamp = time.time() if self.last_packet_received is not None: + self.current_exchange.repeated_receives += 1 if p.type == self.last_packet_received.type and p.sequence == self.last_packet_received.sequence: self.packet_logger.debug("RECV PKT previous") self.packet_radio.tx_up() @@ -418,13 +490,16 @@ def _send_packet(self, packet_to_send, timeout=25): self.packet_logger.info("RECV PKT %s" % p) self.packet_logger.debug("RECEIVED unexpected packet: %s" % p) + self.current_exchange.protocol_errors = 1 self.last_packet_received = p self.packet_sequence = (p.sequence + 1) % 32 packet_to_send.with_sequence(self.packet_sequence) + start_time = time.time() continue except PacketRadioError: + self.current_exchange.radio_errors += 1 self.logger.exception("Radio error during send and receive, retrying") if not self._radio_init(3): raise @@ -435,12 +510,21 @@ def _send_packet(self, packet_to_send, timeout=25): def _get_packet(self, data): rssi = None if data is not None and len(data) > 2: - rssi = data[0] + rssi = (255 - data[0]) / -2 - 73 + getLogger().debug("RSSI: %d" % (rssi)) + self.rssi_total += rssi + self.rssi_count += 1 try: return RadioPacket.parse(data[2:]), rssi except: getLogger().exception("RECEIVED DATA: %s RSSI: %d" % (binascii.hexlify(data[2:]), rssi)) return None, rssi - def _awaken(self): - self.packet_radio.send_packet(bytes(), 0, 0, 250) + def start_rssi_averaging(self): + self.rssi_total = 0 + self.rssi_count = 0 + + def get_rssi_average(self): + if self.rssi_count > 0: + return self.rssi_total / self.rssi_count + return 0 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..945fbfa --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +crypto>=1.4.1 +Flask>=1.1.2 +simplejson>=3.17.2 +RPi.GPIO>=0.7.0 +google-cloud-pubsub>=1.7.0 +requests>=2.24.0 +pymongo>=3.11.0 +pytz>=2020.1 +git+https://github.com/winemug/hbmqtt.git +git+https://github.com/winemug/bluepy.git diff --git a/restapi.py b/restapi.py old mode 100755 new mode 100644 index fedccfe..6bc98eb --- a/restapi.py +++ b/restapi.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/home/pi/v/bin/python3 from threading import Thread import signal import base64 @@ -10,23 +10,28 @@ from flask import Flask, request, send_from_directory from datetime import datetime import time -from podcomm.crc import crc8 from podcomm.pdm import Pdm, PdmLock from podcomm.pod import Pod from podcomm.pr_rileylink import RileyLink from podcomm.definitions import * -from podcomm.protocol_common import RadioPacket +from logging import FileHandler +from batt_check import SpiBatteryVoltageChecker + +g_oldest_diff = None +g_time_diffs = [] g_key = None g_pod = None g_pdm = None g_deny = False g_tokens = [] g_token_lock = Lock() +g_battery_checker = SpiBatteryVoltageChecker() app = Flask(__name__, static_url_path="/") configureLogging() -logger = getLogger() +logger = getLogger(with_console=True) +get_packet_logger(with_console=True) class RestApiException(Exception): @@ -36,17 +41,31 @@ def __init__(self, msg="Unknown"): def __str__(self): return self.error_message +def _set_pod(pod): + global g_pod + global g_pdm + + g_pod = pod + + g_pod.path = DATA_PATH + POD_FILE + POD_FILE_SUFFIX + g_pod.path_db = DATA_PATH + POD_FILE + POD_DB_SUFFIX + g_pod.Save() -def get_pod(): + if g_pdm is not None: + g_pdm.stop_radio() + g_pdm = None + + +def _get_pod(): global g_pod try: if g_pod is None: - if os.path.exists(POD_FILE + POD_FILE_SUFFIX): - g_pod = Pod.Load(POD_FILE + POD_FILE_SUFFIX, POD_FILE + POD_LOG_SUFFIX) + if os.path.exists(DATA_PATH + POD_FILE + POD_FILE_SUFFIX): + g_pod = Pod.Load(DATA_PATH + POD_FILE + POD_FILE_SUFFIX, DATA_PATH + POD_FILE + POD_DB_SUFFIX) else: g_pod = Pod() - g_pod.path = POD_FILE + POD_FILE_SUFFIX - g_pod.log_file_path = POD_FILE + POD_LOG_SUFFIX + g_pod.path = DATA_PATH + POD_FILE + POD_FILE_SUFFIX + g_pod.path_db = DATA_PATH + POD_FILE + POD_DB_SUFFIX g_pod.Save() return g_pod except: @@ -54,65 +73,92 @@ def get_pod(): return None -def get_pdm(): +def _get_pdm(): global g_pdm try: if g_pdm is None: - g_pdm = Pdm(get_pod()) + g_pdm = Pdm(_get_pod()) return g_pdm except: logger.exception("Error while creating pdm instance") return None -def archive_pod(): +def _flush_handlers(logger): + for handler in logger.handlers: + # if isinstance(handler, MemoryHandler): + # handler.flush() + if isinstance(handler, FileHandler): + handler.flush() + handler.close() + +def _archive_pod(): global g_pod global g_pdm try: g_pod = None g_pdm = None + archive_name = None archive_suffix = datetime.utcnow().strftime("_%Y%m%d_%H%M%S") - if os.path.isfile(POD_FILE + POD_FILE_SUFFIX): - os.rename(POD_FILE + POD_FILE_SUFFIX, POD_FILE + archive_suffix + POD_FILE_SUFFIX) - if os.path.isfile(POD_FILE + POD_LOG_SUFFIX): - os.rename(POD_FILE + POD_LOG_SUFFIX, POD_FILE + archive_suffix + POD_LOG_SUFFIX) + + if os.path.isfile(DATA_PATH + POD_FILE + POD_FILE_SUFFIX): + archive_name = DATA_PATH + POD_FILE + archive_suffix + POD_FILE_SUFFIX + os.rename(DATA_PATH + POD_FILE + POD_FILE_SUFFIX, + archive_name) + if os.path.isfile(DATA_PATH + POD_FILE + POD_DB_SUFFIX): + os.rename(DATA_PATH + POD_FILE + POD_DB_SUFFIX, + DATA_PATH + POD_FILE + archive_suffix + POD_DB_SUFFIX) + + _flush_handlers(getLogger()) + _flush_handlers(get_packet_logger()) + + if os.path.isfile(DATA_PATH + OMNIPY_PACKET_LOGFILE + LOGFILE_SUFFIX): + os.rename(DATA_PATH + OMNIPY_PACKET_LOGFILE + LOGFILE_SUFFIX, + DATA_PATH + OMNIPY_PACKET_LOGFILE + archive_suffix + LOGFILE_SUFFIX) + + if os.path.isfile(DATA_PATH + OMNIPY_LOGFILE + LOGFILE_SUFFIX): + os.rename(DATA_PATH + OMNIPY_LOGFILE + LOGFILE_SUFFIX, + DATA_PATH + OMNIPY_LOGFILE + archive_suffix + LOGFILE_SUFFIX) + + return archive_name except: logger.exception("Error while archiving existing pod") -def get_next_pod_address(): +def _get_battery_level(): + global g_battery_checker + return g_battery_checker.get_measurement() + + +def _get_next_pod_address(): try: - if os.path.isfile(LAST_ACTIVATED_FILE): - with open(LAST_ACTIVATED_FILE, "rb") as lastfile: - ab = lastfile.read(4) - addr = (ab[0] << 24) | (ab[1] << 16) | (ab[2] << 8) | ab[3] + try: + with open(DATA_PATH + LAST_ACTIVATED_FILE, "r") as lastfile: + addr = int(lastfile.readline(), 16) blast = (addr & 0x0000000f) + 1 addr = (addr & 0xfffffff0) | (blast & 0x0000000f) - else: + except: mac = get_mac() - b0 = (mac >> 20) & 0xff + b0 = 0x34 b1 = (mac >> 12) & 0xff b2 = (mac >> 4) & 0xff b3 = (mac << 4) & 0xf0 addr = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3 - + addr = addr | 0x00000008 return addr except: logger.exception("Error while getting next radio address") -def save_activated_pod_address(addr): +def _save_activated_pod_address(addr): try: - with open(LAST_ACTIVATED_FILE, "w+b") as lastfile: - b0 = (addr >> 24) & 0xff - b1 = (addr >> 16) & 0xff - b2 = (addr >> 8) & 0xff - b3 = addr & 0xf0 - lastfile.write(bytes([b0, b1, b2, b3])) + with open(DATA_PATH + LAST_ACTIVATED_FILE, "w") as lastfile: + lastfile.write(hex(addr)) except: logger.exception("Error while storing activated radio address") -def create_response(success, response, pod_status=None): + +def _create_response(success, response, pod_status=None): if pod_status is None: pod_status = {} @@ -128,11 +174,13 @@ def create_response(success, response, pod_status=None): "response": response, "status": pod_status, "datetime": time.time(), - "api": {"version_major": API_VERSION_MAJOR, "version_minor": API_VERSION_MINOR} + "api": {"version_major": API_VERSION_MAJOR, "version_minor": API_VERSION_MINOR, + "version_revision": API_VERSION_REVISION, "version_build": API_VERSION_BUILD}, + "battery_level": _get_battery_level() }, indent=4, sort_keys=True) -def verify_auth(request_obj): +def _verify_auth(request_obj): global g_deny try: if g_deny: @@ -162,60 +210,54 @@ def verify_auth(request_obj): raise -@app.route("/") -def main_page(): - try: - return app.send_static_file("omnipy.html") - except: - logger.exception("Error while serving root file") - - -@app.route('/content/') -def send_content(path): - try: - return send_from_directory("static", path) - except: - logger.exception("Error while serving static file from %s" % path) +def _adjust_time(adjustment): + logger.info("Adjusting local time by %d ms" % adjustment) + pdm = _get_pdm() + if pdm is not None: + pdm.set_time_adjustment(adjustment / 1000) def _api_result(result_lambda, generic_err_message): + global g_time_diffs, g_oldest_diff try: if g_deny: raise RestApiException("Pdm is shutting down") - return create_response(True, - response=result_lambda(), pod_status=get_pod()) + # if request.args.get('req_t') is not None: + # req_time = int(request.args.get('req_t')) + # local_time = int(time.time() * 1000) + # difference_ms = (req_time - local_time) + # if g_oldest_diff is None: + # g_oldest_diff = local_time + # + # if g_oldest_diff - local_time > 300: + # g_time_diffs = [difference_ms] + # g_oldest_diff = local_time + # else: + # g_time_diffs.append(difference_ms) + # + # if len(g_time_diffs) > 3: + # diff_avg = sum(g_time_diffs) / len(g_time_diffs) + # g_time_diffs = [] + # + # if diff_avg > 30000 or diff_avg < -30000: + # _adjust_time(diff_avg) + + return _create_response(True, + response=result_lambda(), pod_status=_get_pod()) except RestApiException as rae: - return create_response(False, response=rae, pod_status=get_pod()) + return _create_response(False, response=rae, pod_status=_get_pod()) except Exception as e: logger.exception(generic_err_message) - return create_response(False, response=e, pod_status=get_pod()) + return _create_response(False, response=e, pod_status=_get_pod()) -def ping(): - return {"pong": None} - - -def create_token(): - token = bytes(os.urandom(16)) - with g_token_lock: - g_tokens.append(token) - return {"token": base64.b64encode(token)} - - -def check_password(): - verify_auth(request) - return None - def _get_pdm_address(timeout): - pdm = get_pdm() - packet = None with PdmLock(): - radio = get_pdm().get_radio() - radio.stop() - try: + radio = _get_pdm().get_radio() + radio.stop() packet = radio.get_packet(timeout) finally: radio.disconnect() @@ -226,8 +268,30 @@ def _get_pdm_address(timeout): return packet.address + +def archive_pod(): + _verify_auth(request) + pod = Pod() + _archive_pod() + _set_pod(pod) + +def ping(): + return {"pong": None} + + +def create_token(): + token = bytes(os.urandom(16)) + with g_token_lock: + g_tokens.append(token) + return {"token": base64.b64encode(token)} + + +def check_password(): + _verify_auth(request) + + def get_pdm_address(): - verify_auth(request) + _verify_auth(request) timeout = 30000 if request.args.get('timeout') is not None: @@ -239,8 +303,9 @@ def get_pdm_address(): return {"radio_address": address, "radio_address_hex": "%8X" % address} + def new_pod(): - verify_auth(request) + _verify_auth(request) pod = Pod() @@ -256,25 +321,38 @@ def new_pod(): if pod.radio_address == 0: pod.radio_address = _get_pdm_address(45000) - archive_pod() - pod.Save(POD_FILE + POD_FILE_SUFFIX) + _archive_pod() + _set_pod(pod) -def activate_pod(): - verify_auth(request) - pod = Pod() - archive_pod() - pod.Save(POD_FILE + POD_FILE_SUFFIX) +def pair_pod(): + _verify_auth(request) + + pod = _get_pod() + if pod.state_progress >= PodProgress.Running: + pod = Pod() + _archive_pod() + _set_pod(pod) - pdm = get_pdm() + pdm = _get_pdm() + + req_address = _get_next_pod_address() + utc_offset = int(request.args.get('utc')) + pdm.pair_pod(req_address, utc_offset=utc_offset) + _save_activated_pod_address(req_address) + + +def activate_pod(): + _verify_auth(request) + + pdm = _get_pdm() + pdm.activate_pod() - pdm.activate_pod(get_next_pod_address()) - save_activated_pod_address(pod.radio_address) def start_pod(): - verify_auth(request) + _verify_auth(request) - pdm = get_pdm() + pdm = _get_pdm() schedule=[] @@ -282,11 +360,8 @@ def start_pod(): rate = Decimal(request.args.get("h"+str(i))) schedule.append(rate) - hours = int(request.args.get("hours")) - minutes = int(request.args.get("minutes")) - seconds = int(request.args.get("seconds")) + pdm.inject_and_start(schedule) - pdm.inject_and_start(schedule, hours, minutes, seconds) def _int_parameter(obj, parameter): if request.args.get(parameter) is not None: @@ -294,6 +369,7 @@ def _int_parameter(obj, parameter): return True return False + def _float_parameter(obj, parameter): if request.args.get(parameter) is not None: obj.__dict__[parameter] = float(request.args.get(parameter)) @@ -313,9 +389,9 @@ def _bool_parameter(obj, parameter): def set_pod_parameters(): - verify_auth(request) + _verify_auth(request) - pod = get_pod() + pod = _get_pod() try: reset_nonce = False if _int_parameter(pod, "id_lot"): @@ -348,57 +424,71 @@ def set_pod_parameters(): def get_rl_info(): - verify_auth(request) + _verify_auth(request) r = RileyLink() return r.get_info() + def get_status(): - verify_auth(request) + _verify_auth(request) t = request.args.get('type') if t is not None: req_type = int(t) else: req_type = 0 - pdm = get_pdm() - pdm.update_status(req_type) + pdm = _get_pdm() + id = pdm.update_status(req_type) + + return {"row_id":id} + def deactivate_pod(): - verify_auth(request) - pdm = get_pdm() - pdm.deactivate_pod() - archive_pod() + _verify_auth(request) + pdm = _get_pdm() + id = pdm.deactivate_pod() + _archive_pod() + return {"row_id":id} + def bolus(): - verify_auth(request) + _verify_auth(request) - pdm = get_pdm() + pdm = _get_pdm() amount = Decimal(request.args.get('amount')) - pdm.bolus(amount) + id = pdm.bolus(amount) + return {"row_id":id} + def cancel_bolus(): - verify_auth(request) + _verify_auth(request) + + pdm = _get_pdm() + id = pdm.cancel_bolus() + return {"row_id":id} - pdm = get_pdm() - pdm.cancel_bolus() def set_temp_basal(): - verify_auth(request) + _verify_auth(request) - pdm = get_pdm() + pdm = _get_pdm() amount = Decimal(request.args.get('amount')) hours = Decimal(request.args.get('hours')) - pdm.set_temp_basal(amount, hours, False) + id = pdm.set_temp_basal(amount, hours, False) + return {"row_id":id} + def cancel_temp_basal(): - verify_auth(request) + _verify_auth(request) + + pdm = _get_pdm() + id = pdm.cancel_temp_basal() + return {"row_id":id} - pdm = get_pdm() - pdm.cancel_temp_basal() def set_basal_schedule(): - verify_auth(request) - pdm = get_pdm() + _verify_auth(request) + pdm = _get_pdm() schedule=[] @@ -409,43 +499,114 @@ def set_basal_schedule(): utc_offset = int(request.args.get("utc")) pdm.pod.var_utc_offset = utc_offset - pdm.set_basal_schedule(schedule) + id = pdm.set_basal_schedule(schedule) + return {"row_id":id} + def is_pdm_busy(): - pdm = get_pdm() + pdm = _get_pdm() return {"busy": pdm.is_busy()} + def acknowledge_alerts(): - verify_auth(request) + _verify_auth(request) mask = Decimal(request.args.get('alertmask')) - pdm = get_pdm() - pdm.acknowledge_alerts(mask) + pdm = _get_pdm() + id = pdm.acknowledge_alerts(mask) + return {"row_id":id} + + +def silence_alarms(): + _verify_auth(request) + + pdm = _get_pdm() + id = pdm.hf_silence_will_fall() + return {"row_id":id} def shutdown(): global g_deny - verify_auth(request) + _verify_auth(request) g_deny = True - pdm = get_pdm() + pdm = _get_pdm() while pdm.is_busy(): time.sleep(1) os.system("sudo shutdown -h") return {"shutdown": time.time()} + def restart(): global g_deny - verify_auth(request) + _verify_auth(request) g_deny = True - pdm = get_pdm() + pdm = _get_pdm() while pdm.is_busy(): time.sleep(1) os.system("sudo shutdown -r") return {"restart": time.time()} + +def update_omnipy(): + global g_deny + _verify_auth(request) + + g_deny = True + pdm = _get_pdm() + while pdm.is_busy(): + time.sleep(1) + os.system("/bin/bash /home/pi/omnipy/scripts/update.sh") + return {"update started": time.time()} + + +def update_wlan(): + global g_deny + _verify_auth(request) + + ssid = str(request.args.get('ssid')) + pw = str(request.args.get('pw')) + + g_deny = True + pdm = _get_pdm() + while pdm.is_busy(): + time.sleep(1) + os.system('/bin/bash /home/pi/omnipy/scripts/pi-setwifi.sh "%s" "%s"' % (ssid, pw)) + return {"update started": time.time()} + + +def update_password(): + global g_key + + _verify_auth(request) + + iv = base64.b64decode(request.args.get("i")) + pw_enc = base64.b64decode(request.args.get('pw')) + + cipher = AES.new(g_key, AES.MODE_CBC, iv) + new_key = cipher.decrypt(pw_enc) + + with open(DATA_PATH + KEY_FILE, "wb") as key_file: + key_file.write(new_key) + g_key = new_key + + +@app.route("/") +def main_page(): + try: + return app.send_static_file("omnipy.html") + except: + logger.exception("Error while serving root file") + +@app.route('/content/') +def send_content(path): + try: + return send_from_directory("static", path) + except: + logger.exception("Error while serving static file from %s" % path) + @app.route(REST_URL_PING) def a00(): return _api_result(lambda: ping(), "Failure while pinging") @@ -514,9 +675,13 @@ def a15(): def a16(): return _api_result(lambda: restart(), "Failure while executing reboot") +@app.route(REST_URL_PAIR_POD) +def a165(): + return _api_result(lambda: pair_pod(), "Failure while activating the pod") + @app.route(REST_URL_ACTIVATE_POD) def a17(): - return _api_result(lambda: activate_pod(), "Failure while activating a new pod") + return _api_result(lambda: activate_pod(), "Failure while activating the pod") @app.route(REST_URL_START_POD) def a18(): @@ -526,42 +691,69 @@ def a18(): def a19(): return _api_result(lambda: set_basal_schedule(), "Failure while setting a basal schedule") -def run_flask(): +@app.route(REST_URL_ARCHIVE_POD) +def a20(): + return _api_result(lambda: archive_pod(), "Failure while archiving pod") + +@app.route(REST_URL_OMNIPY_UPDATE) +def a21(): + return _api_result(lambda: update_omnipy(), "Failure while executing software update") + +@app.route(REST_URL_OMNIPY_WIFI) +def a22(): + return _api_result(lambda: update_wlan(), "Failure while updating wifi parameters") + +@app.route(REST_URL_OMNIPY_CHANGE_PASSWORD) +def a23(): + return _api_result(lambda: update_password(), "Failure while changing omnipy password") + +@app.route(REST_URL_SILENCE_ALARMS) +def a24(): + return _api_result(lambda: silence_alarms(), "Failure while silencing") + +def _run_flask(): try: app.run(host='0.0.0.0', port=4444, debug=True, use_reloader=False) except: logger.exception("Error while running rest api, exiting") -def exit_with_grace(): + +def _exit_with_grace(a, b): try: global g_deny g_deny = True - pdm = get_pdm() + pdm = _get_pdm() while pdm.is_busy(): time.sleep(5) + _flush_handlers(getLogger()) + _flush_handlers(get_packet_logger()) except: logger.exception("error during graceful shutdown") exit(0) + if __name__ == '__main__': logger.info("Rest api is starting") try: - with open(KEY_FILE, "rb") as keyfile: + with open(DATA_PATH + KEY_FILE, "rb") as keyfile: g_key = keyfile.read(32) except IOError: logger.exception("Error while reading keyfile. Did you forget to set a password?") raise try: - os.system("sudo systemctl restart systemd-timesyncd && sudo systemctl daemon-reload") + logger.info("Updating clock") + os.system('sudo systemctl stop ntp') + os.system('sudo ntpd -gq') + os.system('sudo systemctl start ntp') except: - logger.exception("Error while reloading timesync daemon") + pass - signal.signal(signal.SIGTERM, exit_with_grace) + signal.signal(signal.SIGTERM, _exit_with_grace) - t = Thread(target=run_flask) + t = Thread(target=_run_flask) t.setDaemon(True) t.start() @@ -570,5 +762,5 @@ def exit_with_grace(): time.sleep(1) except KeyboardInterrupt: - exit_with_grace() + _exit_with_grace(0, 0) diff --git a/scripts/bt-nap.sh b/scripts/bt-nap.sh new file mode 100644 index 0000000..35828d0 --- /dev/null +++ b/scripts/bt-nap.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + + +get_wlan_connection() { + wlan_config=`iwconfig 2>&1 | grep ESSID:off/any` +} + +get_paired_devices() { + paired_devices=`sudo bt-device -l | grep -e \(.*\) --color=never -o| cut -d'(' -f2 | cut -d')' -f1` +} + +try_pair() +{ + sudo btmgmt ssp on + sudo btmgmt connectable on + sudo btmgmt pairable on + sudo btmgmt discov on + + echo "Waiting for remote request" + counter=1 + while [ $counter -le 19 ] + do + ((counter++)) + sleep 10 + sudo bt-device -l > /dev/null 2>&1 + if [[ $? -eq 0 ]]; then + echo "paired with a device, exiting pairing mode" + sleep 20 + break + fi + done + sudo btmgmt discov off + sudo btmgmt pairable off + sudo btmgmt connectable off + sudo btmgmt ssp off +} + +try_connect_bt() { + get_paired_devices + +# if [[ -z "${paired_devices}" ]] || [[ $bt_connection_retries -ge 6 ]]; then +# echo "starting remote initiated pairing procedure" +# bt_connection_retries=0 +# try_pair +# fi +# +# get_paired_devices + + if [[ -z "${paired_devices}" ]]; then + echo "no paired devices" + else + while read -r mac_address; + do + echo "Connecting to ${mac_address}" + sudo killall -9 bt-network > /dev/null 2>&1 + sudo /usr/bin/bt-network -c ${mac_address} nap > /home/pi/omnipy/data/bt-nap.log + if grep -q "connected" /home/pi/omnipy/data/bt-nap.log; then + echo "Disconnected" + bt_connection_retries=0 + else + echo "Connection failed" + ((bt_connection_retries++)) + fi + done <<< "${paired_devices}" + fi +} + +sudo btmgmt power on +sleep 10 +bt_connection_retries=0 +wlan_config= +while true; +do + get_wlan_connection + if [[ ! -z "${wlan_config}" ]]; then + echo "no wlan connection, trying bt" + try_connect_bt + else + echo "wlan connection active, bt-nap postponed" + sleep 120 + fi +done diff --git a/scripts/bt-reset.sh b/scripts/bt-reset.sh new file mode 100644 index 0000000..2a14455 --- /dev/null +++ b/scripts/bt-reset.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which hciconfig` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which hcitool` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which btmgmt` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-agent` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-network` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-device` +sudo find /usr/local -name bluepy-helper -exec setcap 'cap_net_raw,cap_net_admin+eip' {} \; +sudo find /home/pi -name bluepy-helper -exec setcap 'cap_net_raw,cap_net_admin+eip' {} \; + +sudo btmgmt power off +sudo btmgmt power on + +sudo bt-device -l | grep -e \(.*\) --color=never -o| cut -d'(' -f2 | cut -d')' -f1 | while read -r mac +do + if [[ ! -z "${mac}" ]]; then + sudo bt-device -d ${mac} + sudo bt-device -r ${mac} + fi +done diff --git a/scripts/btnap.sh b/scripts/btnap.sh deleted file mode 100644 index b9ad574..0000000 --- a/scripts/btnap.sh +++ /dev/null @@ -1,6 +0,0 @@ -killall bt-network > /dev/null 2>&1 -while true -do - bt-network -c $addr nap > /dev/null 2>&1 - sleep 5 -done diff --git a/scripts/console-ui.sh b/scripts/console-ui.sh new file mode 100644 index 0000000..b9e2878 --- /dev/null +++ b/scripts/console-ui.sh @@ -0,0 +1,486 @@ +#!/bin/bash + + +OMNIPY_HOME="/home/pi/omnipy" + + + +function DoBackup(){ + + cd $OMNIPY_HOME/../ + backupfilename="omnipy_backup_"$(date +%d-%m-%y-%H%M%S)".tar.gz omnipy" + name=$(whiptail --title "Backup Omnipy setup" --inputbox "What backup file name do you want ?" 10 60 $backupfilename 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus = 0 ]; then + tar -cvzf $name + echo "Backup done" + else + echo "No Backup requested" + fi +} + +function UpdateOmnipy(){ + + + if(whiptail --title "Update Omnipy" --yesno "Do you want to backup the current Omnipy setup on your pi ?" 8 45) + then + DoBackup + fi + + cd $OMNIPY_HOME + git stash + git pull -f + bash $OMNIPY_HOME/scripts/pi-update.sh +} + +function NewPODActivation(){ + + + TimeOffset='date +"%:z"' + TimeOffsetMin=120 #to be done and updated in the formula below + StartingBasal=0.05 + + + if (whiptail --title "Activate new POD" --yesno "Have you already filled the new pod and heard the 2 bips ?" 15 80) + then + cd $OMNIPY_HOME + UserTimeOffset=$(whiptail --title "Activate new POD" --inputbox "Please confirm your timezone offset from GMT in MINUTES.\n\nIt can be a negative number !!!" 10 60 $TimeOffsetMin 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus = 0 ] + then + whiptail --title "Activate new POD" --msgbox "Please wait until the POD is fully primed." 15 60 + ./omni.py activate $UserTimeOffset + + whiptail --title "Activate new POD" --msgbox "Once the POD is fully primed, place it on your body" 15 80 + + UserStartingBasal=$(whiptail --title "Activate new POD" --inputbox "Please confirm your starting basal rate" 10 60 $StartingBasal 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus = 0 ] + then + ./omni.py start $UserStartingBasal + + READPDM=$(./omni.py readpdm) + echo $READPDM + STATUS=$(echo $READPDM | jq .success) + echo $STATUS + + if [ $STATUS == "true" ] + then + whiptail --title "Activate new POD" --msgbox "POD up and running" 15 80 + MainMenu + else + whiptail --title "Activate new POD" --msgbox "A problem occured" 15 80 + fi + else + whiptail --title "Activate new POD" --msgbox "A problem occured" 15 80 + fi + else + echo "Activation aborted" + MainMenu + fi + else + whiptail --title "POD Activation" --msgbox "Activation aborted !" 10 60 + MainMenu + fi + +} + + +function PODDeactivation(){ + + if (whiptail --title "POD Deactivation?" --yesno "Are you sure you want to deactivate your POD ?" 10 60) then + cd $OMNIPY_HOME + echo "POD deactivation" + DEACTIVATE=$(./omni.py deactivate) #to be changed by ./omni.py archive + echo $DEACTIVATE + STATUS=$(echo $DEACTIVATE | jq .success) + if [ $STATUS == "true" ] + then + whiptail --title "POD Deactivation" --msgbox "POD Deactivation Succeeded" 15 80 + else + whiptail --title "POD Deactivation" --msgbox "Deactivation Failed" 15 80 + fi + else + whiptail --title "POD Deactivation" --msgbox "POD deactivation cancelled" 10 60 + echo "Deactivation cancelled" + fi +} + + +function ConfigureRPi(){ + sudo raspi-config + MainMenu +} + + +function ConfigureBT(){ + sudo systemctl stop omnipy-pan.service + sudo systemctl disable omnipy-pan.service + echo "Removing existing bluetooth devices" + sudo btmgmt power on + sudo bt-device -l | grep -e \(.*\) --color=never -o| cut -d'(' -f2 | cut -d')' -f1 | while read -r mac + do + if [ !mac ]; then + sudo bt-device -d $mac + sudo bt-device -r $mac + fi + done + echo + echo "Activating bluetooth pairing mode" + sudo btmgmt connectable yes + sudo btmgmt discov yes + sudo btmgmt pairable yes + sudo killall bt-agent + sudo bt-agent -c NoInputNoOutput -d + echo "Bluetooth device is now discoverable" + echo + echo "Open ${bold}bluetooth settings${normal} on your phone to search for and ${bold}pair${normal} with this device" + echo "If you have already paired it on your phone, please unpair it first, then pair again" + echo + printf "Waiting for connection.." + + btdevice= + while [[ -z "$btdevice" ]] + do + printf "." + sleep 1 + btdevice=`sudo bt-device -l | grep -e \(.*\)` + done + + sudo btmgmt discov no + + echo + + echo "${bold}Paired with $btdevice.${normal}" + mac=`echo $btdevice | cut -d'(' -f2 | cut -d')' -f1` + + echo + echo + echo "Please ${bold}enable bluetooth tethering${normal} on your phone if it's not already enabled" + echo "Waiting for connection." + echo "addr=$mac" > /home/pi/omnipy/scripts/btnap-custom.sh + cat /home/pi/omnipy/scripts/btnap.sh >> /home/pi/omnipy/scripts/btnap-custom.sh + sudo cp /home/pi/omnipy/scripts/omnipy-pan.service /etc/systemd/system/ + sudo systemctl enable omnipy-pan.service + sudo systemctl start omnipy-pan.service + ipaddr= + while [[ -z "$ipaddr" ]] + do + printf "." + sleep 1 + ipaddr=`sudo ip -o -4 address | grep bnep0 | grep -e inet.*/ -o | cut -d' ' -f2 | cut -d'/' -f1` + done + echo + echo + echo "${bold}Connection test succeeeded${normal}. IP address: $ipaddr" + +} + + + + + +function DeveloperMenu(){ + echo "Developer Menu" + SUBOPTION=$(whiptail --title "Advanced Settings" --menu "Choose the action you want to perform" --cancel-button "Back" 20 80 11 \ + "1" "Rig status" \ + "2" "View POD.log" \ + "3" "View omnipy.log" \ + "4" "Stop Services" \ + "5" "Restart Services" \ + "6" "Check RileyLink" \ + "7" "Configure Bluetooth" \ + "8" "Reset REST-API password" \ + "9" "Backup current Omnipy config" \ + "10" "Restore Omnipy backup" \ + "11" "Enable/Disable menu at SSH login" 3>&1 1>&2 2>&3) + + exitstatus=$? + if [ $exitstatus -ne 0 ]; then MainMenu; fi; + + + case $SUBOPTION in + 1) + cd $OMNIPY_HOME + echo "processing status..." + RigStatus=$(./omni.py status) + echo $RigStatus >> rigstatus.txt + vim -R rigstatus.txt + rm rigstatus.txt + DeveloperMenu + ;; + + 2) + vim -R ~/omnipy/data/pod.log + DeveloperMenu + ;; + + 3) + vim -R ~/omnipy/data/omnipy.log + DeveloperMenu + ;; + + 4) + echo "Stop services" + sudo systemctl stop omnipy-rest.service + sudo systemctl stop omnipy-beacon.service + sudo systemctl stop omnipy-pan.service + + Services_Status= + + if $(systemctl -q is-active omnipy-rest.service) + then + Services_Status="Failed: Omnipy Service has not been stopped\n" + else + Services_Status="Success: Omnipy Service has been stopped successfully\n" + fi + + + if $(systemctl -q is-active omnipy-beacon.service) + then + Services_Status+="Failed: Omnipy-beacon Service has not been stopped\n" + else + Services_Status+="Success: Omnipy-beacon Service has been stopped successfully\n" + fi + + + if $(systemctl -q is-active omnipy-pan.service) + then + Services_Status+="Failed: Omnipy-pan Service has not been stopped" + else + Services_Status+="Success: Omnipy-pan Service has been stopped successfully" + fi + + whiptail --title "Omnipy Services stop " --msgbox "$Services_Status" 10 80 + DeveloperMenu + ;; + + 5) + echo "Restart services" + sudo systemctl restart omnipy-rest.service + sudo systemctl restart omnipy-beacon.service + sudo systemctl restart omnipy-pan.service + + + Services_Status= + + if $(systemctl -q is-active omnipy-rest.service) + then + Services_Status="Success: Omnipy Service has been restarted successfully\n" + else + Services_Status="Failed: Omnipy Service has been not been restarted successfully\n" + fi + + + if $(systemctl -q is-active omnipy-beacon.service) + then + Services_Status+="Success: Omnipy-beacon Service has been restarted successfully\n" + else + Services_Status+="Failed: Omnipy-beacon Service has not been restarted successfully\n" + fi + + + if $(systemctl -q is-active omnipy-pan.service) + then + Services_Status+="Success: Omnipy-pan Service has been restarted successfully" + else + Services_Status+="Failed: Omnipy-pan Service has not been restarted successfully" + fi + + whiptail --title "Omnipy Services Restart " --msgbox "$Services_Status" 10 80 + DeveloperMenu + ;; + + 6) + echo "Verify RileyLink" + python3 ~/omnipy/verify_rl.py + DeveloperMenu + ;; + 7) + echo "Configure Bluetooth" + ConfigureBT + DeveloperMenu + + ;; + 8) + echo "Reset REST-API password" + /usr/bin/python3 /home/pi/omnipy/set_api_password.py + DeveloperMenu + + ;; + + + 9) + DoBackup + + ;; + + + 10) + echo "Restore Omnipy backup - to be done" + cd ~ + ListofBackups=`for x in $(ls -1 *.gz); do echo $x "-"; done` + + if [ -z "$ListofBackups" ] + then + whiptail --title "Restore Backup" --msgbox "There are no backup to restore" 20 80 + DeveloperMenu + + else + OPTION=$(whiptail --title "Restore backup" --menu "Select the backup you want to restore:" 20 80 10 $ListofBackups 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus -ne 0 ]; then exit; fi; + + cd ~ + mv omnipy omnipy_revert + tar -xvzf $OPTION + + whiptail --title "Restore Backup" --msgbox "Your pi will now reboot, please reconnect in a minute..." 20 80 + sudo reboot + fi + ;; + + 11) + Linetoadd="ForceCommand ./omnipy/scripts/console-ui.sh" + Filetochange="/etc/ssh/sshd_config" + + if (whiptail --title "Menu autostart?" --yesno "Do you want menu to be loaded at logon ?" 10 60) then + #Check if the ForceCommand exists and if not, modify /etc/ssh/sshd_config file by adding ForceCommand ./Scripts/console-ui.sh + + if grep -Fxq "$Linetoadd" $Filetochange + then + # code if found + echo "Already activated" + else + # code if not found + echo "Line added" + sudo sh -c "echo $Linetoadd >> $Filetochange" + sudo systemctl restart ssh + fi + else + #check if ForceCommand .$OMNIPY_HOME/scripts/console-ui.sh exits in /etc/ssh/sshd_config and it yes, remove it + + if grep -Fxq "$Linetoadd" $Filetochange + then + # code if found + echo "Delete the line" + sudo sed -i "\#$Linetoadd#d" "$Filetochange" + sudo systemctl restart ssh + else + # code if not found + echo "Already deactivated" + fi + fi + DeveloperMenu + ;; + + + esac + + +} + +function MainMenu(){ + +while true +do + +#check if Menu autostart is enabled +ExitbuttonName="Back to Shell" +AutoMenuCommand="ForceCommand ./Scripts/console-ui.sh" +sshdpath="/etc/ssh/sshd_config" + +if grep -Fxq "$AutoMenuCommand" $sshdpath + then + # code if found + ExitbuttonName="Exit" + else + # code if not found + ExitbuttonName="Back to Shell" +fi + +OPTION=$(whiptail --title "Omnipy Menu" --menu "Choose the action you want to perform" --cancel-button "$ExitbuttonName" 20 50 8 \ +"1" "Activate New Pod" \ +"2" "Deactivate Pod" \ +"3" "Configure Raspberry Pi" \ +"4" "Update Omnipy" \ +"5" "Safe Reboot" \ +"6" "Safe Shutdown" \ +"7" "Escape to Shell" \ +"8" "Advanced Settings" 3>&1 1>&2 2>&3) + +exitstatus=$? +if [ $exitstatus -ne 0 ]; then exit; fi; + +case $OPTION in + + 1) #Activate a new pod + NewPODActivation + ;; + + 2) + PODDeactivation + MainMenu + + ;; + + + 3) + ConfigureRPi + + ;; + + 4) + echo "Update Omnipy" + if(whiptail --title "Update Omnipy" --yesno "Do you want to update omnipy to the latest master branch ?" 8 45) + then + UpdateOmnipy + else + echo "Omnipy update cancelled" + fi + + ;; + + 5) + echo "Safe Reboot" + if(whiptail --title "Safe Reboot" --yesno "Do you want to reboot your pi ? If yes, you'll need to reconnect..." 8 45) + then + sudo reboot + else + echo "Reboot cancelled" + fi + + ;; + + 6) + echo "Safe Shutdown" + if(whiptail --title "Safe Shutdown" --yesno "Do you want to shutdown your pi ? If yes, to restart the pi, unplug and plug again the power supply..." 8 45) + then + sudo shutdown now + else + echo "Shutdown cancelled" + fi + ;; + + + 7) + echo "Escape to Shell" + if(whiptail --title "Escape to Shell" --yesno "Type exit to return to the menu" 8 45) + then + bash + else + MainMenu + fi + ;; + + 8) + DeveloperMenu + + ;; + +esac + +done +} + +MainMenu diff --git a/scripts/omnipy-beacon.service b/scripts/image/omnipy-beacon.service similarity index 70% rename from scripts/omnipy-beacon.service rename to scripts/image/omnipy-beacon.service index f426cf7..a0823b3 100644 --- a/scripts/omnipy-beacon.service +++ b/scripts/image/omnipy-beacon.service @@ -3,11 +3,11 @@ Description=Omnipy UDP broadcast receiver After=network.target [Service] -ExecStart=/usr/bin/python3 -u /home/pi/omnipy/omnipy_beacon.py +ExecStart=/home/pi/v/bin/python -u /home/pi/omnipy/omnipy_beacon.py WorkingDirectory=/home/pi/omnipy StandardOutput=inherit StandardError=inherit -Restart=on-abort +Restart=on-failure User=pi [Install] diff --git a/scripts/image/omnipy-messenger.service b/scripts/image/omnipy-messenger.service new file mode 100644 index 0000000..25da9b3 --- /dev/null +++ b/scripts/image/omnipy-messenger.service @@ -0,0 +1,18 @@ +[Unit] +Description=Omnipy Messenger +After=network.target +StartLimitIntervalSec=0 + + +[Service] +ExecStart=/home/pi/v/bin/python -u /home/pi/omnipy/omnipy_messenger_service.py +WorkingDirectory=/home/pi/omnipy +StandardOutput=inherit +StandardError=inherit +TimeoutStopSec=15 +Restart=on-failure +RestartSec=1 +User=pi + +[Install] +WantedBy=multi-user.target diff --git a/scripts/image/omnipy-mq.service b/scripts/image/omnipy-mq.service new file mode 100644 index 0000000..a5df65c --- /dev/null +++ b/scripts/image/omnipy-mq.service @@ -0,0 +1,18 @@ +[Unit] +Description=Omnipy MQ +After=network.target +StartLimitIntervalSec=0 + + +[Service] +ExecStart=/home/pi/v/bin/python -u /home/pi/omnipy/mq.py +WorkingDirectory=/home/pi/omnipy +StandardOutput=inherit +StandardError=inherit +TimeoutStopSec=15 +Restart=on-failure +RestartSec=90 +User=pi + +[Install] +WantedBy=multi-user.target diff --git a/scripts/omnipy-pan.service b/scripts/image/omnipy-pan.service similarity index 79% rename from scripts/omnipy-pan.service rename to scripts/image/omnipy-pan.service index 3a1bf2e..414b8a9 100644 --- a/scripts/omnipy-pan.service +++ b/scripts/image/omnipy-pan.service @@ -3,7 +3,7 @@ Description=Omnipy Bluetooth PAN service After=network.target [Service] -ExecStart=/bin/bash /home/pi/omnipy/scripts/btnap-custom.sh +ExecStart=/bin/bash /home/pi/omnipy/scripts/bt-nap.sh WorkingDirectory=/home/pi/omnipy/scripts/ StandardOutput=inherit StandardError=inherit diff --git a/scripts/omnipy.service b/scripts/image/omnipy-rest.service similarity index 53% rename from scripts/omnipy.service rename to scripts/image/omnipy-rest.service index 8404066..efe0a4d 100644 --- a/scripts/omnipy.service +++ b/scripts/image/omnipy-rest.service @@ -1,13 +1,13 @@ [Unit] Description=Omnipy Rest API -After=network.target +After=network.target omnipy-beacon.service omnipy-hotspot.service omnipy-pan.service [Service] -ExecStart=/usr/bin/python3 -u /home/pi/omnipy/restapi.py +ExecStart=/home/pi/v/bin/python -u /home/pi/omnipy/restapi.py WorkingDirectory=/home/pi/omnipy StandardOutput=inherit StandardError=inherit -TimeoutStopSec=90 +TimeoutStopSec=15 Restart=on-abort User=pi diff --git a/scripts/image/omnipy-sync.service b/scripts/image/omnipy-sync.service new file mode 100644 index 0000000..87c6f2a --- /dev/null +++ b/scripts/image/omnipy-sync.service @@ -0,0 +1,17 @@ +[Unit] +Description=Omnipy Sync +After=network.target + +[Service] +ExecStart=/home/pi/v/bin/python -u /home/pi/omnipy/dbsync.py +WorkingDirectory=/home/pi/omnipy +StandardOutput=inherit +StandardError=inherit +TimeoutStopSec=15 +TimeoutStopSec=15 +Restart=on-failure +RestartSec=90 +User=pi + +[Install] +WantedBy=multi-user.target diff --git a/scripts/image/rc.local b/scripts/image/rc.local new file mode 100644 index 0000000..8d983f0 --- /dev/null +++ b/scripts/image/rc.local @@ -0,0 +1,16 @@ +#!/bin/sh -e +# +# rc.local +# +# This script is executed at the end of each multiuser runlevel. +# Make sure that the script will "exit 0" on success or any other +# value on error. +# +# In order to enable or disable this script just change the execution +# bits. +# +# By default this script does nothing. + +/bin/bash /home/pi/omnipy/scripts/recovery.sh + +exit 0 diff --git a/scripts/image/recovery.key b/scripts/image/recovery.key new file mode 100644 index 0000000..35f5df4 Binary files /dev/null and b/scripts/image/recovery.key differ diff --git a/scripts/image/setup.sh b/scripts/image/setup.sh new file mode 100644 index 0000000..5a1cf8b --- /dev/null +++ b/scripts/image/setup.sh @@ -0,0 +1,151 @@ +#qemu-system-arm -kernel c:\dev\qemu-rpi-kernel\kernel-qemu-4.19.50-buster -cpu arm1176 -m 256 -M versatilepb -dtb c:\dev\qemu-rpi-kernel\versatile-pb-buster.dtb -no-reboot -serial stdio -append "root=/dev/sda2 panic=1 rootfstype=ext4 rw" -drive "file=c:\dev\omnipy.img,index=0,media=disk,format=raw" -net user,hostfwd=tcp::5022-:22 -net nic + +#!/usr/bin/env bash +sudo touch /boot/ssh + +sudo passwd pi +sudo raspi-config + +# hostname: omnipy-dev +#? adv, memory split, 16 +# enable predictive intf names +# timezone other/utc +# adv resize fs +# reboot + + +sudo apt update && sudo apt upgrade -y +sudo apt autoremove + +### omnipy-bare image end + +### omnipy-base image start + +sudo apt install -y screen git python3 python3-pip vim jq bluez-tools libglib2.0-dev python3-rpi.gpio ntp fake-hwclock \ +bluez-tools python3-pip python3-venv gobject-introspection libgirepository1.0-dev libcairo2-dev expect build-essential \ +libdbus-1-dev libudev-dev libical-dev libreadline-dev rpi-update + +git config --global user.email "omnipy@balya.net" +git config --global user.name "Omnipy Setup" +git clone https://github.com/winemug/omnipy.git +git clone https://github.com/winemug/bluepy.git + +mkdir -p /home/pi/omnipy/data +rm /home/pi/omnipy/data/key +cp /home/pi/omnipy/scripts/image/recovery.key /home/pi/omnipy/data/key + +### omnipy-base image end + + +### omnipy-sw image start +python3 -m pip install --user pip --upgrade +python3 -m pip install --user virtualenv --upgrade + +python3 -m venv /home/pi/v +source /home/pi/v/bin/activate +python3 -m pip install pip --upgrade +python3 -m pip install setuptools --upgrade + +cd /home/pi/bluepy +python3 setup.py build +python3 setup.py install + +#creating /home/pi/v/lib/python3.7/site-packages/bluepy-1.3.0-py3.7.egg +#Extracting bluepy-1.3.0-py3.7.egg to /home/pi/v/lib/python3.7/site-packages +#Adding bluepy 1.3.0 to easy-install.pth file +#Installing blescan script to /home/pi/v/bin +#Installing sensortag script to /home/pi/v/bin +#Installing thingy52 script to /home/pi/v/bin + +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which hciconfig` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which hcitool` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which btmgmt` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-agent` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-network` +sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-device` +sudo find /home/pi -name bluepy-helper -exec setcap 'cap_net_raw,cap_net_admin+eip' {} \; +sudo find /home/pi -name blescan -exec setcap 'cap_net_raw,cap_net_admin+eip' {} \; +sudo find /home/pi -name sensortag -exec setcap 'cap_net_raw,cap_net_admin+eip' {} \; +sudo find /home/pi -name thingy52 -exec setcap 'cap_net_raw,cap_net_admin+eip' {} \; + +python3 -m pip install rpi-gpio simplejson paho-mqtt requests crypto flask pycrypto + + +# services + +cd /home/pi/omnipy +git stash +git pull + +chmod -R 755 /home/pi/omnipy/scripts/*.sh +chmod -R 755 /home/pi/omnipy/*.py +sudo cp /home/pi/omnipy/scripts/image/rc.local /etc/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-rest.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-mq.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-beacon.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-pan.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-sync.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-messenger.service /etc/systemd/system/ + +sudo systemctl enable omnipy-rest.service +sudo systemctl enable omnipy-beacon.service +sudo systemctl enable omnipy-pan.service + +#sudo systemctl start omnipy-rest.service +#sudo systemctl start omnipy-beacon.service +#sudo systemctl start omnipy-pan.service + +#sudo touch /boot/omnipy-pwreset +#sudo touch /boot/omnipy-expandfs +#sudo touch /boot/omnipy-btreset + +sudo raspi-config +# hostname omnipy +exit + +# version update +# halt + +### clean-up via scp + +#fin + + +sudo umount /dev/sdh1 +sudo umount /dev/sdh2 + +#sudo gparted /dev/sdh +#shrink with /g/parted 3192MiB + +sudo e2fsck -f /dev/sdh2 +sudo resize2fs /dev/sdh2 3G + +sudo fdisk /dev/sdh + +-p + +#Device Boot Start End Sectors Size Id Type +#/dev/sdh1 8192 532479 524288 256M c W95 FAT32 (LBA) +#/dev/sdh2 532480 124735487 124203008 59.2G 83 Linux + +-d 2 +-n p 2 532480 +3145728K -N +-p + +#Device Boot Start End Sectors Size Id Type +#/dev/sdh1 8192 532479 524288 256M c W95 FAT32 (LBA) +#/dev/sdh2 532480 6823935 6291456 3G 83 Linux + +-w + +sudo e2fsck -f /dev/sdh2 + +sudo mount /dev/sdh2 /mnt +sudo dcfldd if=/dev/zero of=/mnt/zero.txt +sudo rm /mnt/zero.txt +sudo sync +sudo umount /dev/sdh2 +sudo sync + +sudo dcfldd if=/dev/sdh of=omnipy.img bs=512 count=6389760 +zip -9 omnipy.img.zip omnipy.img diff --git a/scripts/pi-setup.sh b/scripts/pi-setup.sh deleted file mode 100644 index a7d3218..0000000 --- a/scripts/pi-setup.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/bin/bash - -bold=$(tput bold) -normal=$(tput sgr0) -echo -echo Welcome to ${bold}omnipy${normal} installation script -echo This script will aid you in configuring your raspberry pi to run omnipy -echo - -if [[ -d /home/pi/omnipy ]] -then - -echo -read -p "You seem to have omnipy already installed, do you want to reinstall it?" -r -echo - -if [[ $REPLY =~ ^[Yy]$ ]] -then -sudo systemctl stop omnipy.service -sudo systemctl stop omnipy-beacon.service -sudo systemctl stop omnipy-pan.service - -sudo systemctl disable omnipy.service -sudo systemctl disable omnipy-beacon.service -sudo systemctl disable omnipy-pan.service - -fi - -else -read -p "Press Enter to continue..." -fi - -echo -echo ${bold}Step 1/11: ${normal}Updating package repositories -sudo apt update -if [[ $? > 0 ]] -then - echo "Warning: updating package repositories failed on first attempt - retrying" - sudo apt update || ((echo "Error: updating package repositories failed on second attempt - aborting" && exit)) - echo "Retry successful - updating package repositories suceeded on second attempt" -fi - -echo -echo ${bold}Step 2/11: ${normal}Upgrading existing packages -sudo apt upgrade -y -if [[ $? > 0 ]] -then - echo "Warning: updating existing packages failed on first attempt - retrying" - sudo apt upgrade -y || ((echo "Error: updating existing packages failed on second attempt - aborting" && exit)) - echo "Retry successful - updating existing packages suceeded on second attempt" -fi - -sudo systemctl disable omnipy.service > /dev/null 2>&1 -sudo systemctl disable omnipy-beacon.service > /dev/null 2>&1 -sudo systemctl disable omnipy-pan.service > /dev/null 2>&1 -sudo systemctl stop omnipy-pan.service > /dev/null 2>&1 -sudo systemctl stop omnipy-beacon.service > /dev/null 2>&1 -sudo systemctl stop omnipy.service > /dev/null 2>&1 - -if [[ ! -d /home/pi/omnipy ]] -then -echo -echo ${bold}Step 3/11: ${normal}Downloading and installing omnipy -cd /home/pi -git clone https://github.com/winemug/omnipy.git -cd /home/pi/omnipy -else -echo -echo ${bold}Step 3/11: ${normal}Updating omnipy -cd /home/pi/omnipy -git config --global user.email "omnipy@balya.net" -git config --global user.name "Omnipy Setup" -git stash -git pull -fi -mkdir /home/pi/omnipy/data > /dev/null 2>&1 -chmod 755 /home/pi/omnipy/omni.py - -echo -echo ${bold}Step 4/11: ${normal}Installing dependencies -sudo apt install -y bluez-tools python3 python3-pip git build-essential libglib2.0-dev vim || ((echo "Error: installing dependencies failed - aborting" && exit)) -sudo pip3 install simplejson || ((echo "Error: installing dependencies failed - aborting" && exit)) -sudo pip3 install Flask || ((echo "Error: installing dependencies failed - aborting" && exit)) -sudo pip3 install cryptography || ((echo "Error: installing dependencies failed - aborting" && exit)) -sudo pip3 install requests || ((echo "Error: installing dependencies failed - aborting" && exit)) - -echo -echo ${bold}Step 5/11: ${normal}Configuring and installing bluepy -cd /home/pi -sudo rm -rf bluepy -git clone https://github.com/winemug/bluepy.git -cd bluepy -python3 ./setup.py build -sudo python3 ./setup.py install -cd /home/pi/omnipy - -echo -echo ${bold}Step 6/11: ${normal}Enabling bluetooth management for users -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which hciconfig` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which hcitool` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which btmgmt` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-agent` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-network` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-device` -sudo find / -name bluepy-helper -exec setcap 'cap_net_raw,cap_net_admin+eip' {} \; - -echo -echo ${bold}Step 7/11: ${normal}Safe shutdown -echo -read -p "Are you using/planning a LipoShim to safely power down the pi if you get a low battery? Press y if so to install the relevant service, or n if you are using a USB power pack (y/n) " -r -echo -if [[ $REPLY =~ ^[Yy]$ ]] -then - curl https://raw.githubusercontent.com/dexdan/clean-shutdown/master/zerolipo_omnipy | bash -fi - -echo -echo ${bold}Step 8/11: ${normal}Omnipy HTTP API Password configuration -/usr/bin/python3 /home/pi/omnipy/set_api_password.py - -echo -echo ${bold}Step 9/11: ${normal}RileyLink test -echo -echo This step will test if your RileyLink device is connectable and has the -echo correct firmware version installed. -echo -read -p "Do you want to test the Rileylink now? (y/n) " -r -echo -if [[ $REPLY =~ ^[Yy]$ ]] -then - /usr/bin/python3 /home/pi/omnipy/verify_rl.py -fi - -echo ${bold}Step 10/11: ${normal}Setting up bluetooth personal area network -echo -read -p "Do you want to set up a bluetooth personal area network? (y/n) " -r -if [[ $REPLY =~ ^[Yy]$ ]] -then - echo - echo "Removing existing bluetooth devices" - sudo btmgmt power on - sudo bt-device -l | grep -e \(.*\) --color=never -o| cut -d'(' -f2 | cut -d')' -f1 | while read -r mac - do - if [ !mac ]; then - sudo bt-device -d $mac - sudo bt-device -r $mac - fi - done - echo - echo "Activating bluetooth pairing mode" - sudo btmgmt connectable yes - sudo btmgmt discov yes - sudo btmgmt pairable yes - sudo killall bt-agent - sudo bt-agent -c NoInputNoOutput -d - echo "Bluetooth device is now discoverable" - echo - echo "Open ${bold}bluetooth settings${normal} on your phone to search for and ${bold}pair${normal} with this device" - echo "If you have already paired it on your phone, please unpair it first, then pair again" - echo - printf "Waiting for connection.." - - btdevice= - while [[ -z "$btdevice" ]] - do - printf "." - sleep 1 - btdevice=`sudo bt-device -l | grep -e \(.*\)` - done - - sudo btmgmt discov no - - echo - - echo "${bold}Paired with $btdevice.${normal}" - mac=`echo $btdevice | cut -d'(' -f2 | cut -d')' -f1` - - echo - echo - echo "Please ${bold}enable bluetooth tethering${normal} on your phone if it's not already enabled" - echo "Waiting for connection." - echo "addr=$mac" > /home/pi/omnipy/scripts/btnap-custom.sh - cat /home/pi/omnipy/scripts/btnap.sh >> /home/pi/omnipy/scripts/btnap-custom.sh - sudo cp /home/pi/omnipy/scripts/omnipy-pan.service /etc/systemd/system/ - sudo systemctl enable omnipy-pan.service - sudo systemctl start omnipy-pan.service - ipaddr= - while [[ -z "$ipaddr" ]] - do - printf "." - sleep 1 - ipaddr=`sudo ip -o -4 address | grep bnep0 | grep -e inet.*/ -o | cut -d' ' -f2 | cut -d'/' -f1` - done - echo - echo - echo "${bold}Connection test succeeeded${normal}. IP address: $ipaddr" -fi - -echo -echo ${bold}Step 11/11: ${normal}Creating and starting omnipy services - -sudo chown -R pi.pi /home/pi/bluepy -sudo chown -R pi.pi /home/pi/omnipy - -sudo cp /home/pi/omnipy/scripts/omnipy.service /etc/systemd/system/ -sudo cp /home/pi/omnipy/scripts/omnipy-beacon.service /etc/systemd/system/ -sudo systemctl enable omnipy.service -sudo systemctl enable omnipy-beacon.service -sudo systemctl start omnipy.service -sudo systemctl start omnipy-beacon.service - -echo -echo ${bold}Setup completed.${normal} - diff --git a/scripts/pi-update.sh b/scripts/pi-update.sh deleted file mode 100644 index 3137a1d..0000000 --- a/scripts/pi-update.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/bin/bash - -if [[ ! -d /home/pi/omnipy ]] -then - -echo -echo "You don't seem to have omnipy installed, please run the pi-setup.sh script first" -exit - -fi - -bold=$(tput bold) -normal=$(tput sgr0) -echo -echo "Welcome to ${bold}omnipy${normal} update script" -echo "This script will let you reconfigure omnipy as in the setup script" -echo -echo "Stopping omnipy services" -sudo systemctl disable omnipy.service -sudo systemctl disable omnipy-beacon.service -sudo systemctl disable omnipy-pan.service -sudo systemctl stop omnipy.service -sudo systemctl stop omnipy-beacon.service -sudo systemctl stop omnipy-pan.service - - -read -p "Do you want update to the latest version in the github repository? " -r -echo - -if [[ $REPLY =~ ^[Yy]$ ]] -then -echo "Updating omnipy" -cd /home/pi/omnipy -git config --global user.email "omnipy@balya.net" -git config --global user.name "Omnipy Setup" -git stash -git pull -fi - -read -p "Do you want reinstall the dependencies? " -r -echo -if [[ $REPLY =~ ^[Yy]$ ]] -then -echo Installing dependencies -cd /home/pi/omnipy -sudo apt install -y bluez-tools python3 python3-pip git build-essential libglib2.0-dev vim -sudo pip3 install simplejson -sudo pip3 install Flask -sudo pip3 install cryptography -sudo pip3 install requests -echo -echo Configuring and installing bluepy -cd /home/pi -sudo rm -rf bluepy -git clone https://github.com/winemug/bluepy.git -cd bluepy -python3 ./setup.py build -sudo python3 ./setup.py install -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which hciconfig` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which hcitool` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which btmgmt` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-agent` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-network` -sudo setcap 'cap_net_raw,cap_net_admin+eip' `which bt-device` -sudo find / -name bluepy-helper -exec setcap 'cap_net_raw,cap_net_admin+eip' {} \; -fi - -read -p "Do you want reconfigure the API password? " -r -if [[ $REPLY =~ ^[Yy]$ ]] -then -cd /home/pi/omnipy -/usr/bin/python3 /home/pi/omnipy/set_api_password.py -fi - -read -p "Do you want test the RileyLink? " -r -if [[ $REPLY =~ ^[Yy]$ ]] -then -cd /home/pi/omnipy -/usr/bin/python3 /home/pi/omnipy/verify_rl.py -fi - -read -p "Do you want reconfigure bluetooth personal area network? " -r -if [[ $REPLY =~ ^[Yy]$ ]] -then - sudo systemctl stop omnipy-pan.service > /dev/null 2>&1 - sudo systemctl disable omnipy-pan.service > /dev/null 2>&1 - echo - echo "Removing existing bluetooth devices" - sudo btmgmt power on - sudo bt-device -l | grep -e \(.*\) --color=never -o| cut -d'(' -f2 | cut -d')' -f1 | while read -r mac - do - if [ !mac ]; then - sudo bt-device -d $mac - sudo bt-device -r $mac - fi - done - echo - echo "Activating bluetooth pairing mode" - sudo btmgmt connectable yes - sudo btmgmt discov yes - sudo btmgmt pairable yes - sudo killall bt-agent - sudo bt-agent -c NoInputNoOutput -d - echo "Bluetooth device is now discoverable" - echo - echo "Open ${bold}bluetooth settings${normal} on your phone to search for and ${bold}pair${normal} with this device" - echo "If you have already paired it on your phone, please unpair it first, then pair again" - echo - printf "Waiting for connection.." - - btdevice= - while [[ -z "$btdevice" ]] - do - printf "." - sleep 1 - btdevice=`sudo bt-device -l | grep -e \(.*\)` - done - - sudo btmgmt discov no - - echo - - echo "${bold}Paired with $btdevice.${normal}" - mac=`echo $btdevice | cut -d'(' -f2 | cut -d')' -f1` - - echo - echo - echo "Please ${bold}enable bluetooth tethering${normal} on your phone if it's not already enabled" - echo "Waiting for connection." - echo "addr=$mac" > /home/pi/omnipy/scripts/btnap-custom.sh - cat /home/pi/omnipy/scripts/btnap.sh >> /home/pi/omnipy/scripts/btnap-custom.sh - sudo cp /home/pi/omnipy/scripts/omnipy-pan.service /etc/systemd/system/ - sudo systemctl enable omnipy-pan.service - sudo systemctl start omnipy-pan.service - ipaddr= - while [[ -z "$ipaddr" ]] - do - printf "." - sleep 1 - ipaddr=`sudo ip -o -4 address | grep bnep0 | grep -e inet.*/ -o | cut -d' ' -f2 | cut -d'/' -f1` - done - echo - echo - echo "${bold}Connection test succeeeded${normal}. IP address: $ipaddr" - sudo systemctl stop omnipy-pan.service -fi - -echo -echo Updating service scripts and restarting services - -sudo chown -R pi.pi /home/pi/bluepy -sudo chown -R pi.pi /home/pi/omnipy - -if [[ -f /home/pi/omnipy/scripts/btnap-custom.sh ]] -then - sudo cp /home/pi/omnipy/scripts/omnipy-pan.service /etc/systemd/system/ - sudo systemctl enable omnipy-pan.service - sudo systemctl start omnipy-pan.service -fi - -sudo cp /home/pi/omnipy/scripts/omnipy.service /etc/systemd/system/ -sudo cp /home/pi/omnipy/scripts/omnipy-beacon.service /etc/systemd/system/ -sudo systemctl enable omnipy.service -sudo systemctl enable omnipy-beacon.service -sudo systemctl start omnipy.service -sudo systemctl start omnipy-beacon.service -sudo systemctl daemon-reload -echo -echo ${bold}Configuration updated.${normal} diff --git a/scripts/recovery.sh b/scripts/recovery.sh new file mode 100644 index 0000000..dcbaf0f --- /dev/null +++ b/scripts/recovery.sh @@ -0,0 +1,41 @@ +#!/bin/bash +#FW_UPDATE_FILE=/boot/omnipy-fwupdate +PW_RESET_FILE=/boot/omnipy-pwreset +BT_RESET_FILE=/boot/omnipy-btreset +UPGRADE_FILE=/boot/omnipy-upgrade +EXPAND_FS=/boot/omnipy-expandfs +WLAN_INTERFACE=wlan0 + +if [[ -f ${EXPAND_FS} ]]; then + /bin/rm ${EXPAND_FS} + raspi-config --expand-rootfs + shutdown -r now +fi + +#if [[ -f ${FW_UPDATE_FILE} ]]; then +# /bin/rm ${FW_UPDATE_FILE} +# /bin/rm /boot/.firmware_revision +# cp /home/pi/omnipy/scripts/image/rpiupdate.sh /usr/bin/rpiupdate +# ROOT_PATH=/ BOOT_PATH=/boot SKIP_DOWNLOAD=1 SKIP_REPODELETE=1 SKIP_BACKUP=1 UPDATE_SELF=0 RPI_REBOOT=1 rpi-update +# shutdown -r now +#fi + +if [[ -f ${PW_RESET_FILE} ]]; then + echo "pi:omnipy" | chpasswd + mkdir -p /home/pi/omnipy/data + rm /home/pi/omnipy/data/key + cp /home/pi/omnipy/scripts/image/recovery.key /home/pi/omnipy/data/key + /bin/rm ${PW_RESET_FILE} + /sbin/shutdown -r now +fi + +if [[ -f ${BT_RESET_FILE} ]]; then + su -c "/bin/bash /home/pi/omnipy/scripts/bt-reset.sh &" pi + /bin/rm ${BT_RESET_FILE} + /sbin/shutdown -r now +fi + +if [[ -f ${UPGRADE_FILE} ]]; then + su -c "/bin/bash /home/pi/omnipy/scripts/update.sh &" pi + /sbin/shutdown -r now +fi \ No newline at end of file diff --git a/scripts/start-beacon.sh b/scripts/start-beacon.sh new file mode 100644 index 0000000..f77b1c7 --- /dev/null +++ b/scripts/start-beacon.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +source /home/pi/v/bin/activate +cd /home/pi/omnipy +python3 -u /home/pi/omnipy/omnipy_beacon.py diff --git a/scripts/start-mq.sh b/scripts/start-mq.sh new file mode 100644 index 0000000..e447261 --- /dev/null +++ b/scripts/start-mq.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +source /home/pi/v/bin/activate +cd /home/pi/omnipy +python3 -u /home/pi/omnipy/mq.py diff --git a/scripts/start-restapi.sh b/scripts/start-restapi.sh new file mode 100644 index 0000000..31e949d --- /dev/null +++ b/scripts/start-restapi.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +source /home/pi/v/bin/activate +cd /home/pi/omnipy +python3 -u /home/pi/omnipy/restapi.py diff --git a/scripts/start-sync.sh b/scripts/start-sync.sh new file mode 100644 index 0000000..ad38051 --- /dev/null +++ b/scripts/start-sync.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +source /home/pi/v/bin/activate +cd /home/pi/omnipy +python3 -u /home/pi/omnipy/dbsync.py diff --git a/scripts/update-finalize.sh b/scripts/update-finalize.sh new file mode 100644 index 0000000..b54480d --- /dev/null +++ b/scripts/update-finalize.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +echo +echo Omnipy updater started + +echo Updating raspbian packages +sudo apt update && sudo apt upgrade -y && sudo apt autoremove -y + +echo Updating python environment +python3 -m pip install --user pip --upgrade +python3 -m pip install --user virtualenv --upgrade +python3 -m venv --upgrade /home/pi/v + +source /home/pi/v/bin/activate +python3 -m pip install pip setuptools --upgrade + +echo Installing and updating libraries for omnipy +python3 -m pip install -r /home/pi/omnipy/requirements.txt + +#echo Updating omnipy customized bluepy library +#cd /home/pi/bluepy +#git stash +#git pull +#python3 setup.py build +#python3 setup.py install + +deactivate + +sudo cp /home/pi/omnipy/scripts/image/rc.local /etc/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-pan.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-rest.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-beacon.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-mq.service /etc/systemd/system/ +sudo cp /home/pi/omnipy/scripts/image/omnipy-sync.service /etc/systemd/system/ + +sudo rm /boot/omnipy-upgrade +echo +echo Configuration updated. +echo +echo Rebooting +echo +sleep 3 +sudo reboot diff --git a/scripts/update.sh b/scripts/update.sh new file mode 100644 index 0000000..12d6c12 --- /dev/null +++ b/scripts/update.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +echo +echo "Stopping omnipy services" +sudo systemctl stop omnipy-beacon.service +sudo systemctl stop omnipy-mq.service +sudo systemctl stop omnipy-rest.service +sudo systemctl stop omnipy-pan.service +sudo systemctl stop omnipy-sync.service + +echo "Updating omnipy" +cd /home/pi/omnipy +git config --global user.email "omnipy@balya.net" +git config --global user.name "Omnipy Setup" +git stash +git pull + +/bin/bash /home/pi/omnipy/scripts/update-finalize.sh diff --git a/set_api_password.py b/set_api_password.py old mode 100755 new mode 100644 index 78de5ef..b127db4 --- a/set_api_password.py +++ b/set_api_password.py @@ -1,4 +1,5 @@ -#!/usr/bin/python3 +#!/home/pi/v/bin/python3 + import hashlib from podcomm.definitions import * @@ -20,15 +21,15 @@ def main(): salt = "bythepowerofgrayskull".encode("utf-8") hash_obj = hashlib.sha256(password + salt) key = hash_obj.digest() - with open(KEY_FILE, "w+b") as keyfile: + with open(DATA_PATH + KEY_FILE, "w+b") as keyfile: keyfile.write(bytes(key)) break except Exception as e: getLogger().error("Error while creating and saving password: %s" % e) raise print("Password has been set.") - print("Restarting omnipy.service for changes to take effect") - os.system("sudo systemctl restart omnipy.service") + print("Restarting omnipy-rest.service for changes to take effect") + os.system("sudo systemctl restart omnipy-rest.service") return diff --git a/settings-example.json b/settings-example.json new file mode 100644 index 0000000..d5ab1ab --- /dev/null +++ b/settings-example.json @@ -0,0 +1,11 @@ +{ + "mongo_url": "mongodb://username:password@internetaddress:portnumber/nightscout?authSource=nightscout&ssl=false", + "mongo_collection": "omnipy", + "mqtt_clientid": "secret_client_id_that_is_let_through_by_the_mqtt_server", + "mqtt_command_topic": "omnipy_cmd", + "mqtt_host": "internetaddress", + "mqtt_port": 1883, + "mqtt_response_topic": "omnipy_response", + "mqtt_json_topic": "omnipy_json", + "py/object": "__main__.OmnipyConfiguration" +} \ No newline at end of file diff --git a/t1.py b/t1.py new file mode 100644 index 0000000..62efe26 --- /dev/null +++ b/t1.py @@ -0,0 +1,74 @@ +import time +from decimal import Decimal +import datetime as dt + +from podcomm.definitions import configureLogging, getLogger, get_packet_logger +from podcomm.pdm import Pdm +from podcomm.pod import Pod +from omnipy_podsession import PodSession, get_pod_session + + +def print_pod_status(ps: PodSession): + ts_now = time.time() + print(f"Id: {ps.pod_id}") + print(f"Running {dt.timedelta(seconds=int(ts_now - ps.activation_ts))}") + print(f"Remaining {dt.timedelta(seconds=80*60*60 + int(ps.activation_ts-ts_now))}") + print("-------------------------------------") + if len(ps.temp_basals) == 0 or ps.temp_basals[-1][1] < ts_now: + print(f"Scheduled basal running at {ps.basal_rate * ps.precision:.2f}U/h") + else: + temp_start, temp_end, temp_rate = ps.temp_basals[-1] + temp_rate = temp_rate * ps.precision + temp_running_for = int(ts_now - temp_start) + temp_remaining= int(temp_end - ts_now) + + print(f"Temp basal running at {temp_rate:.2f}U/h, active {dt.timedelta(seconds=temp_running_for)} / {dt.timedelta(seconds=temp_remaining+temp_running_for)}") + + if len(ps.boluses) == 0: + print(f"Bolus not running") + else: + bolus_start, bolus_ticks, bolus_interval = ps.boluses[-1] + bolus_amount = bolus_ticks * ps.precision + bolus_end = bolus_ticks * bolus_interval + bolus_start + if bolus_end <= ts_now: + print(f"Bolus not running") + else: + bolus_remaining_time = int(bolus_end - ts_now) + bolus_delivered = int((ts_now - bolus_start) / bolus_interval) * ps.precision + bolus_remaining = bolus_amount - bolus_delivered + print(f"Bolus running with interval {bolus_interval}s, delivered: {bolus_delivered:.2f}U / {bolus_amount:.2f}U, remaining {dt.timedelta(seconds=bolus_remaining_time)}") + print("-------------------------------------") + last_ts, last_minute, last_delivered, last_undelivered, last_reservoir = ps.last_entry + print(f"Last status at {dt.datetime.fromtimestamp(last_ts):%d-%b %H:%M:%S}") + print(f"Delivered at status time: {last_delivered:.2f}U") + print(f"To deliver at status time: {last_undelivered:.2f}U") + if last_reservoir <= 51.0: + print(f"Remaining at status time: {last_reservoir:.2f}U") + else: + print(f"Remaining at status time: {200 - last_delivered - last_undelivered:.2f}U (estimated)") + + +configureLogging() +logger = getLogger(with_console=True) +get_packet_logger(with_console=True) + +# ps = get_pod_session("/home/pi/omnipy/data/pod.db") +# # for text, ts, d, nd, r in ps.activity_log: +# # dts = dt.datetime.fromtimestamp(ts) +# # +# # print(f'{dts:%d-%b %H:%M:%S} {d}\t{nd}\t{r}\t{text}') +# +# print_pod_status(ps) + +pod = Pod.Load("/home/pi/omnipy/data/pod.json", "/home/pi/omnipy/data/pod.db") +pdm = Pdm(pod) +pdm.start_radio() +while True: + try: + pdm.update_status(2) + print(f'********************************RSSI: {pod.radio_rssi} LG: {pod.radio_low_gain}') + except Exception as e: + print(e) +pdm.stop_radio() + + diff --git a/tm.py b/tm.py new file mode 100644 index 0000000..ff84285 --- /dev/null +++ b/tm.py @@ -0,0 +1,108 @@ +import simplejson as json +import time +import os + +from google.api_core.exceptions import AlreadyExists +from google.cloud import pubsub_v1 + + +def get_now(): + return int(time.time() * 1000) + + +os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/pi/omnipy/google-settings.json" +subscriber = pubsub_v1.SubscriberClient() +sub_topic_path = subscriber.topic_path('omnicore17', 'py-rsp') +subscription_path = subscriber.subscription_path('omnicore17', 'sub-pyrsp-tmop') +try: + subscriber.create_subscription(subscription_path, sub_topic_path, ack_deadline_seconds=10) +except AlreadyExists: + pass + +publisher = pubsub_v1.PublisherClient( + batch_settings=pubsub_v1.types.BatchSettings( + max_bytes=4096, + max_latency=5, + ), + client_config={ + "interfaces": { + "google.pubsub.v1.Publisher": { + "retry_params": { + "messaging": { + 'total_timeout_millis': 60000, # default: 600000 + } + } + } + } + }, + publisher_options=pubsub_v1.types.PublisherOptions( + flow_control=pubsub_v1.types.PublishFlowControl( + message_limit=1000, + byte_limit=1024 * 64, + limit_exceeded_behavior=pubsub_v1.types.LimitExceededBehavior.BLOCK, + ))) +publish_topic = publisher.topic_path('omnicore17', 'py-cmd') + + +def get_response(req: {}) -> {}: + print(f'Sending request: {req}') + publisher.publish(publish_topic, json.dumps(req).encode('UTF-8')) + + print(f"Waiting for response") + while True: + response = subscriber.pull(subscription_path, max_messages=100) + + for msg in response.received_messages: + rsp = json.loads(bytes.decode(msg.message.data, encoding='UTF-8')) + rsp_req = rsp['request'] + if rsp_req['id'] == req['id']: + print(f'!matched: {msg}') + subscriber.acknowledge(subscription_path, ack_ids=[msg.ack_id]) + return rsp + else: + print(f'no match: {rsp_req["id"]}') + subscriber.acknowledge(subscription_path, ack_ids=[msg.ack_id]) + # subscriber.modify_ack_deadline(subscription_path, [msg.ack_id], 0) + + +def get_last_state() -> int: + response = get_response({ + 'type': 'last_status', + 'id': get_now(), + 'expiration': None, + 'state': None + }) + return response['state'] + + +def status(): + return get_response({ + 'type': 'update_status', + 'id': get_now(), + 'expiration': get_now() + 120 * 1000, + 'state': get_last_state(), + }) + + +def bolus(ticks: int, interval: int): + return get_response( + { + 'type': 'bolus', + 'id': get_now(), + 'expiration': None, + 'state': get_last_state(), + 'parameters': { + 'ticks': ticks, + 'interval': interval + } + }) + + +print(get_response({ + 'type': 'cancel_temp_basal', + 'id': get_now(), + 'expiration': get_now() + 120 * 1000, + 'state': None, +})) +subscriber.close() +publisher.stop() diff --git a/verify_rl.py b/verify_rl.py index 6c39863..b9797bd 100644 --- a/verify_rl.py +++ b/verify_rl.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/home/pi/v/bin/python3 from podcomm.pr_rileylink import RileyLink from podcomm.definitions import * diff --git a/version.py b/version.py new file mode 100644 index 0000000..a6ae694 --- /dev/null +++ b/version.py @@ -0,0 +1,5 @@ +#!/home/pi/v/bin/python3 +from podcomm.definitions import API_VERSION_MAJOR, API_VERSION_MINOR, API_VERSION_REVISION, API_VERSION_BUILD + + +print("%d.%d.%d.%d" % (API_VERSION_MAJOR, API_VERSION_MINOR, API_VERSION_REVISION, API_VERSION_BUILD)) \ No newline at end of file