From 9b9de016c06b685d5e420595dbd78fa14d833467 Mon Sep 17 00:00:00 2001 From: Patryk Niemiec Date: Sun, 11 Dec 2022 18:22:32 +0100 Subject: [PATCH 1/7] Add new simulation client for camera stream --- cameraStream/SimulationWAPIStreamClient.py | 10 +- .../Simulation/okon_sim_client.py | 391 ++++++++++++++++++ main_GUI.py | 12 +- 3 files changed, 405 insertions(+), 8 deletions(-) create mode 100644 communicationThreads/Simulation/okon_sim_client.py diff --git a/cameraStream/SimulationWAPIStreamClient.py b/cameraStream/SimulationWAPIStreamClient.py index 37ebaa4..acf0884 100644 --- a/cameraStream/SimulationWAPIStreamClient.py +++ b/cameraStream/SimulationWAPIStreamClient.py @@ -5,7 +5,7 @@ import cv2 import numpy as np -import communicationThreads.Simulation.simulationClient as sc +import communicationThreads.Simulation.simulationClient2 as sc import variable from cameraStream.stream import cameraStream @@ -21,10 +21,10 @@ class SimulationWAPIStreamClient(cameraStream): __fi_x = (math.pi - h_fov) / 2 __fi_y = 2 * math.pi - v_fov / 2 - def __init__(self): + def __init__(self, client): threading.Thread.__init__(self) self.frame = None - self.client = sc.SimulationClient() + self.client = client def run(self): while self.active: @@ -36,8 +36,8 @@ def getFrame(self): return self.frame def receive_frame(self): - val = self.client.get_stream_frame() - if val != None: + val = self.client.okon.video + if val is not None: self.frame = val def setFov(self, h_fov, v_fov): diff --git a/communicationThreads/Simulation/okon_sim_client.py b/communicationThreads/Simulation/okon_sim_client.py new file mode 100644 index 0000000..a6108cd --- /dev/null +++ b/communicationThreads/Simulation/okon_sim_client.py @@ -0,0 +1,391 @@ +import json +import socket +import time +from queue import Queue +from threading import Thread + + +class PacketType: # PacketType.PING + SET_MTR = 0xA0 + ARM_MTR = 0xA1 + DISARM_MTR = 0xA2 + SET_CONTROL_MODE = 0xA3 + SET_ACRO = 0xA4 + SET_STABLE = 0xA5 + SET_PID = 0xA6 + GET_SENS = 0xB0 + GET_DEPTH = 0xB1 + GET_DEPTH_BYTES = 0xB2 + GET_VIDEO_BYTES = 0xB3 + GET_VIDEO = 0xB4 + SET_SIM = 0xC0 + ACK = 0xC1 + SET_ORIEN = 0xC3 + RST_SIM = 0xC4 + PING = 0xC5 + GET_CPS = 0xC6 + HIT_NGZ = 0xC7 + HIT_FZ = 0xC8 + CHK_AP = 0xC9 + ERROR = 0xCA + REC_STRT = 0xD0 + REC_ST = 0xD1 + REC_RST = 0xD2 + GET_REC = 0xD3 + GET_DETE = 0xDE + + def get(val: int) -> str: # PacketType.get(0xA5) + for k, v in vars(PacketType).items(): + if v == val: + return k + + +class PacketFlag: # PacketType.PING + NONE = 0 + SERVER_ECHO = 1 + DO_NOT_LOG_PACKET = 2 + TEST = 128 + + def get(val: int) -> str: # PacketFlag.get(1|2) + flags = "" + if val == 0: + return "NONE " + if val & PacketFlag.SERVER_ECHO: + flags += "SERVER_ECHO " + if val & PacketFlag.DO_NOT_LOG_PACKET: + flags += "DO_NOT_LOG_PACKET " + if val & PacketFlag.TEST: + flags += "TEST " + return flags + + +class OkonSim: + def __init__(self, okon_client) -> None: + self._okon_client = okon_client + self.sens = { + "imu": { + "rot": {"x": 0, "y": 0, "z": 0}, + "rotSpeed": {"x": 0, "y": 0, "z": 0}, + "rotAccel": {"x": 0, "y": 0, "z": 0}, + "accel": {"x": 0, "y": 0, "z": 0}, + }, + "baro": 0, + "detection": [], + } + self.control = { + "mode": "unknown", + "stable": { + "rot": {"x": 0, "y": 0, "z": 0}, + "vel": {"x": 0, "y": 0, "z": 0}, + "depth": 1.3, + }, + "acro": { + "rotSpeed": {"x": 0, "y": 0, "z": 0}, + "vel": {"x": 0, "y": 0, "z": 0}, + }, + "manual": { + "FLH": 0, + "FLV": 0, + "BLV": 0, + "BLH": 0, + "FRH": 0, + "FRV": 0, + "BRV": 0, + "BRH": 0, + }, + } + self.orien = {"pos": {"x": 0, "y": 0, "z": 0}, "rot": {"x": 0, "y": 0, "z": 0}} + self.pids = None # pids get from syncing with the server + self.video = None + self.depth = None + + def set_depth(self, depth: float, add: bool = False) -> None: + old_depth = self.control["stable"]["depth"] + if add: + self.control["stable"]["depth"] += depth + else: + self.control["stable"]["depth"] = depth + if old_depth != self.control["stable"]["depth"]: + self._okon_client.send( + PacketType.SET_STABLE, + PacketFlag.DO_NOT_LOG_PACKET, + json.dumps(self.control["stable"]), + ) + + def set_stable_vel(self, x: float = None, y: float = None, z: float = None) -> None: + if x is not None: + self.control["stable"]["vel"]["x"] = x + if y is not None: + self.control["stable"]["vel"]["y"] = y + if z is not None: + self.control["stable"]["vel"]["z"] = z + self._okon_client.send( + PacketType.SET_STABLE, + PacketFlag.DO_NOT_LOG_PACKET, + json.dumps(self.control["stable"]), + ) + + def arm_motors(self) -> None: + self._okon_client.send(PacketType.ARM_MTR, PacketFlag.NONE) + + def disarm_motors(self) -> None: + self._okon_client.send(PacketType.DISARM_MTR, PacketFlag.NONE) + + def setMode(self, mode: str) -> None: + self.control["mode"] = mode + self._okon_client.send(PacketType.SET_CONTROL_MODE, PacketFlag.NONE, mode) + + def reachedTargetRotation(self, delta): + return ( + angle_difference(self.control["stable"]["rot"]["x"], self.sens["imu"]["rot"]["x"]) < delta + and angle_difference(self.control["stable"]["rot"]["y"], self.sens["imu"]["rot"]["y"]) < delta + and angle_difference(self.control["stable"]["rot"]["z"], self.sens["imu"]["rot"]["z"]) < delta + ) + + def reachedTargetDepth(self, delta): + return abs(self.control["stable"]["depth"] - self.sens["baro"] / 1000 / 9.81) < delta + + def get_detection(self, className: str): + return list( + filter( + lambda d: d["className"] == className and d["visibleInFrame"], + self.sens["detection"], + ) + ) + + def set_stable_rot(self, x: float = None, y: float = None, z: float = None, add=False) -> None: + if x is not None: + if add: + self.control["stable"]["rot"]["x"] += x + else: + self.control["stable"]["rot"]["x"] = x + if y is not None: + if add: + self.control["stable"]["rot"]["y"] += y + else: + self.control["stable"]["rot"]["y"] = y + if z is not None: + if add: + self.control["stable"]["rot"]["z"] += z + else: + self.control["stable"]["rot"]["z"] = z + self.control["stable"]["rot"] = angle_norm(self.control["stable"]["rot"]) + self._okon_client.send( + PacketType.SET_STABLE, + PacketFlag.DO_NOT_LOG_PACKET, + json.dumps(self.control["stable"]), + ) + + +class Simulation: + def __init__(self, okon_client) -> None: + self._okon_client = okon_client + self.checkpoints = None # got from the server + + def reset(self) -> None: + self._okon_client.send(PacketType.RST_SIM) + + def sync(self) -> None: + self._okon_client.send(PacketType.SET_ORIEN, PacketFlag.DO_NOT_LOG_PACKET) + self._okon_client.send(PacketType.GET_SENS, PacketFlag.DO_NOT_LOG_PACKET) + self._okon_client.send(PacketType.SET_PID, PacketFlag.DO_NOT_LOG_PACKET) + self._okon_client.send(PacketType.SET_CONTROL_MODE, PacketFlag.DO_NOT_LOG_PACKET) + self._okon_client.send(PacketType.SET_STABLE, PacketFlag.DO_NOT_LOG_PACKET) + self._okon_client.send(PacketType.GET_DETE, PacketFlag.DO_NOT_LOG_PACKET) + self._okon_client.send(PacketType.GET_VIDEO_BYTES, PacketFlag.DO_NOT_LOG_PACKET) + self._okon_client.send(PacketType.GET_DEPTH_BYTES, PacketFlag.DO_NOT_LOG_PACKET) + + +def angle_difference(angle1: float, angle2: float) -> float: + diff = abs(((angle1 + 360) % 360) - ((angle2 + 360) % 360)) + return min(diff, 360 - diff) + + +def angle0360(angle: float) -> float: + return (angle % 360 + 360) % 360 + + +def angle180(angle: float) -> float: + return angle - 360 if angle > 180 else angle + + +def angle_norm(a: dict) -> dict: + x = angle180(angle0360(a["x"])) + y = angle180(angle0360(a["y"])) + z = angle180(angle0360(a["z"])) + if abs(x) > 90: + y = angle180(180 + y) + x = angle180(angle0360(180 - x)) + z = angle180(angle0360(180 + z)) + return {"x": x, "y": y, "z": z} + + +class OkonSimClient: + def __init__(self, ip, port, options=None, sync_interval=0.05, debug=True) -> None: + self.okon = OkonSim(self) + self.simulation = Simulation(self) + + self.ip = ip + self.port = port + self.debug = debug + self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.connected = False + self._events = dict() + self.syncTime = time.time() + self.sync_interval = sync_interval + self._to_send = Queue() + + def connect(self) -> bool: + if self.debug: + print(f"Connecting to {self.ip}:{self.port}") + try: + self.socket.connect((self.ip, self.port)) + self.connected = True + comm_thread = Thread(target=self._comm_thread) + sync_thread = Thread(target=self._sync_thread) + comm_thread.daemon = True + sync_thread.daemon = True + comm_thread.start() + sync_thread.start() + return True + except Exception as err: + print(f"failed to connect {err}") + return False + + def send(self, packet_type: int, packet_flag: int = PacketFlag.NONE, json: str = None) -> None: + if json == None: + self._to_send.put((packet_type, packet_flag, 0)) + else: + json_bytes = json.encode() + self._to_send.put((packet_type, packet_flag, len(json_bytes), json_bytes)) + + def _sync_thread(self) -> None: + while self.connected: + while not self._to_send.empty(): + packet = self._to_send.get() + self._send(packet) + + if time.time() > self.syncTime + self.sync_interval: + self.syncTime = time.time() + self.simulation.sync() + else: + time.sleep(0.001) # avg ping ~2.7ms + + def _send(self, packet: tuple) -> None: + self.socket.sendall(packet[0].to_bytes(1, byteorder="little")) + self.socket.sendall(packet[1].to_bytes(1, byteorder="little")) + self.socket.sendall(packet[2].to_bytes(4, byteorder="little")) + if packet[2] > 0: + self.socket.sendall(packet[3]) + if self.debug and packet[1] & PacketFlag.DO_NOT_LOG_PACKET == 0: + print(f'SENT {PacketType.get(packet[0])} {PacketFlag.get(packet[1])} dat:{packet[3].decode("utf-8") }') + else: + if self.debug and packet[1] & PacketFlag.DO_NOT_LOG_PACKET == 0: + print(f"SENT {PacketType.get(packet[0])} {PacketFlag.get(packet[1])}") + + def _receiveAll(self, n: int) -> bytes: + buffer = b"" + while len(buffer) != n: + buffer += self.socket.recv(n - len(buffer)) + return buffer + + def _comm_thread(self) -> None: + while self.connected: + packet_typeFlag = self._receiveAll(2) + dataLength = int.from_bytes(self._receiveAll(4), "little") + data_bytes = self._receiveAll(dataLength) + self._handle_packet(packet_typeFlag[0], packet_typeFlag[1], data_bytes) + + def _handle_packet(self, packet_type: int, packet_flag: int, data_bytes: bytes): + if self.debug and packet_flag & PacketFlag.DO_NOT_LOG_PACKET == 0: + print( + f'RECV {PacketType.get(packet_type)} {PacketFlag.get(packet_flag)} len:{len(data_bytes)} {data_bytes.decode("utf-8")[:40]}' + ) + if packet_type == PacketType.SET_MTR: + manual = json.loads(data_bytes.decode()) + self.okon.control["manual"] = manual + elif packet_type == PacketType.ARM_MTR: + pass + elif packet_type == PacketType.DISARM_MTR: + pass + elif packet_type == PacketType.SET_CONTROL_MODE: + self.okon.control["mode"] = data_bytes.decode() + elif packet_type == PacketType.SET_ACRO: + acro = json.loads(data_bytes.decode()) + self.okon.control["acro"] = acro + elif packet_type == PacketType.SET_STABLE: + stable = json.loads(data_bytes.decode()) + self.okon.control["stable"] = stable + elif packet_type == PacketType.SET_PID: + pids = json.loads(data_bytes.decode()) + self.okon.pids = pids + elif packet_type == PacketType.GET_SENS: + sens = json.loads(data_bytes.decode()) + sens["rot "] = angle_norm(sens["rot"]) + self.okon.sens["baro"] = sens["baro"]["pressure"] + self.okon.sens["imu"] = sens + elif packet_type == PacketType.GET_DEPTH: + pass + elif packet_type == PacketType.GET_DEPTH_BYTES: + self.okon.depth = data_bytes + elif packet_type == PacketType.GET_VIDEO_BYTES: + self.okon.video = data_bytes + elif packet_type == PacketType.GET_VIDEO: + pass + elif packet_type == PacketType.SET_SIM: + pass + elif packet_type == PacketType.ACK: + pass + elif packet_type == PacketType.SET_ORIEN: + orien = json.loads(data_bytes.decode()) + self.okon.orien["pos"] = orien["pos"] + self.okon.orien["rot"] = angle_norm(orien["rot"]) + elif packet_type == PacketType.RST_SIM: + self._emit_event("simRST") + elif packet_type == PacketType.PING: + self._emit_event("ping", data_bytes.decode()) + elif packet_type == PacketType.GET_CPS: + checkpoints = json.loads(data_bytes.decode()) + self.simulation.checkpoints = checkpoints + elif packet_type == PacketType.HIT_NGZ: + ngz = json.loads(data_bytes.decode()) + self._emit_event("hitNGZ", ngz["id"]) + elif packet_type == PacketType.HIT_FZ: + fz = json.loads(data_bytes.decode()) + self._emit_event("hitFZ", fz["id"]) + elif packet_type == PacketType.CHK_AP: + pass + elif packet_type == PacketType.ERROR: + error = json.loads(data_bytes.decode()) + self._emit_event("error", error) + elif packet_type == PacketType.REC_STRT: + pass + elif packet_type == PacketType.REC_ST: + pass + elif packet_type == PacketType.REC_RST: + pass + elif packet_type == PacketType.GET_REC: + pass + elif packet_type == PacketType.GET_DETE: + self.okon.sens["detection"] = json.loads(data_bytes.decode()) + else: + self._emit_event("packet", (packet_type, packet_flag, data_bytes)) + + def disconnect(self) -> None: + self.connected = False + self.socket.close() + self._emit_event("disconnect") + + def on_event(self, name: str, func) -> None: + if name in self._events: + self._events[name].append(func) + else: + self._events[name] = list() + self._events[name].append(func) + + def _emit_event(self, name: str, args=None) -> None: + if name in self._events: + for e in self._events[name]: + event_thread = Thread(target=e, args=(args,)) + event_thread.daemon = True + event_thread.start() diff --git a/main_GUI.py b/main_GUI.py index 79266c6..ab4a17f 100644 --- a/main_GUI.py +++ b/main_GUI.py @@ -16,6 +16,7 @@ # for GUI only from communicationThreads.GUI.Server import JetsonServer from communicationThreads.GUI.Setup import PrepareCallbacks +from communicationThreads.Simulation.okon_sim_client import OkonSimClient, PacketFlag, PacketType from config.ConfigLoader import ConfigLoader # controlThread @@ -24,8 +25,13 @@ if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) + # init sim_client + simulation_client = OkonSimClient(ip="127.0.0.1", port=44210, sync_interval=0.05, debug=False) + if not simulation_client.connect(): + print("Not connected!") + # init cameraStream - cameraStream = SimulationWAPIStreamClient() + cameraStream = SimulationWAPIStreamClient(simulation_client) cameraStream.setFov(60, 60) # init control thread controlThread = SimulationControlThread() @@ -39,10 +45,10 @@ autonomyThread = AutonomyThread(detector, controller) autonomyThread.StartAutonomy() - # load config and start camera + # # load config and start camera controlThread.setPIDs(ConfigLoader.LoadPIDs("config/PID_simulation.json")) cameraStream.start() - # detector.StartDetecting() + detector.StartDetecting() # lines only for gui mode = "simulation" From 53416bd98f8fb30d341bbd8f33b6785d2845f0f1 Mon Sep 17 00:00:00 2001 From: Patryk Niemiec Date: Tue, 13 Dec 2022 19:09:03 +0100 Subject: [PATCH 2/7] working streaming with uncomplete control --- cameraStream/SimulationWAPIStreamClient.py | 12 ++---- controlThread/controlThread_simulation.py | 48 ++++++++++++++-------- main_GUI.py | 13 +++--- 3 files changed, 44 insertions(+), 29 deletions(-) diff --git a/cameraStream/SimulationWAPIStreamClient.py b/cameraStream/SimulationWAPIStreamClient.py index acf0884..4ef5350 100644 --- a/cameraStream/SimulationWAPIStreamClient.py +++ b/cameraStream/SimulationWAPIStreamClient.py @@ -5,7 +5,7 @@ import cv2 import numpy as np -import communicationThreads.Simulation.simulationClient2 as sc +import communicationThreads.Simulation.simulationClient as sc import variable from cameraStream.stream import cameraStream @@ -28,17 +28,12 @@ def __init__(self, client): def run(self): while self.active: - self.receive_frame() + self.frame = self.client.okon.video # around 50 fps time.sleep(0.01) def getFrame(self): - return self.frame - - def receive_frame(self): - val = self.client.okon.video - if val is not None: - self.frame = val + return self.client.okon.video def setFov(self, h_fov, v_fov): self.h_fov = h_fov @@ -55,6 +50,7 @@ def getDepthMap(self): return self.client.get_depth_map() # jpg bytes def getPointCloud(self): + # TODO: fix d_map = self.client.get_depth_map() # jpg bytes nparray = np.frombuffer(img_bytes, np.uint8) img = cv2.imdecode(nparray, cv2.IMREAD_GRAYSCALE) diff --git a/controlThread/controlThread_simulation.py b/controlThread/controlThread_simulation.py index 9f4ae33..3b66431 100644 --- a/controlThread/controlThread_simulation.py +++ b/controlThread/controlThread_simulation.py @@ -6,56 +6,72 @@ class SimulationControlThread(ControlThread): - def __init__(self): + def __init__(self, client): ControlThread.__init__(self) - self.client = SimulationClient() - self.PIDThread = PIDThread(self.client, self.HandleNewData) + self.client = client + # self.PIDThread = PIDThread(self.client, self.HandleNewData) def HandleSteeringInput(self, data): - self.PIDThread.HandleSteeringInput(data) + print("Handling steering...") + # self.PIDThread.HandleSteeringInput(data) # common methods for mode 1 and 2 def disarm(self): super().disarm() - self.PIDThread.disarm() + print("Handling steering...") + self.client.okon.disarm_motors() + # self.PIDThread.disarm() def arm(self, mode=0): super().arm() - self.PIDThread.arm(mode) + self.client.okon.arm_motors() + # self.PIDThread.arm(mode) def setControlMode(self, mode): super().setControlMode(mode) print("mode changed to:" + str(mode)) - self.PIDThread.mode = mode - self.PIDThread.heading_setpoint = self.PIDThread.client.get_sample("yaw") - self.mode = mode + # self.PIDThread.mode = mode + # self.PIDThread.heading_setpoint = self.PIDThread.client.get_sample("yaw") + # self.mode = mode + self.mode = "stable" + self.client.okon.setMode("stable") def moveForward(self, value): - self.PIDThread.moveForward(value) + self.client.okon.set_stable_vel(z=value) + # self.PIDThread.moveForward(value) # mode 0 def setAngularVelocity(self, roll, pitch, yaw): super().setAngularVelocity(roll, pitch, yaw) - self.PIDThread.velocity_setpoints = [roll, pitch, yaw] + print("set angular velocity") + # TODO: implement angular velocity + # + # self.PIDThread.velocity_setpoints = [roll, pitch, yaw] # TODO zachowanie głębokości w tym trybie def vertical(self, arg): - self.PIDThread.vertical = arg + print("vertical") + # self.PIDThread.vertical = arg # mode 1 def setAttitude(self, roll, pitch, yaw): super().setAttitude(roll, pitch, yaw) - self.PIDThread.SetAttitude(roll, pitch, yaw) + self.client.okon.set_stable_rot(x=pitch, y=yaw, z=roll) + # self.PIDThread.SetAttitude(roll, pitch, yaw) def setDepth(self, depth): super().setDepth(depth) - self.PIDThread.SetDepth(depth) + self.client.okon.set_depth(depth) + # self.PIDThread.SetDepth(depth) def setPIDs(self, arg): - self.PIDThread.SetPIDs(arg) + print("set pid") + # self.PIDThread.SetPIDs(arg) def getPIDs(self): - return self.PIDThread.GetPIDs() + print("Get pids") + return None + # return self.PIDThread.GetPIDs() def HandleNewData(self, data): attitude, gyro, acc, mag, depth, angular_velocity, position, velocity, acceleration, motors = data diff --git a/main_GUI.py b/main_GUI.py index ab4a17f..08fea9f 100644 --- a/main_GUI.py +++ b/main_GUI.py @@ -34,21 +34,23 @@ cameraStream = SimulationWAPIStreamClient(simulation_client) cameraStream.setFov(60, 60) # init control thread - controlThread = SimulationControlThread() + controlThread = SimulationControlThread(simulation_client) # init autonomy helpers # we can switch them according to environment - detector = Simulation_noGPU_detector(cameraStream, controlThread) + # detector = Simulation_noGPU_detector(cameraStream, controlThread) + + detector = None controller = Controller(controlThread) # init autonomy autonomyThread = AutonomyThread(detector, controller) - autonomyThread.StartAutonomy() + # autonomyThread.StartAutonomy() # # load config and start camera controlThread.setPIDs(ConfigLoader.LoadPIDs("config/PID_simulation.json")) cameraStream.start() - detector.StartDetecting() + # detector.StartDetecting() # lines only for gui mode = "simulation" @@ -61,11 +63,12 @@ # after receiving a msg server invokes a callback # for sending telemetry server uses 'dataCollector' marked below as 'd' + # TODO: Setup callbacks to support new simulation c, d = PrepareCallbacks(detector, autonomyThread, controlThread) server.SetCallbacks(c, d) # event handling - make sure that arguments are matching - detector.RegisterDetectionCallback(server.sender.SendDetection) + # detector.RegisterDetectionCallback(server.sender.SendDetection) # start when ready server.StartServer() From ac55fd05b621842b4d61361580eeebe048477740 Mon Sep 17 00:00:00 2001 From: Patryk Niemiec Date: Sun, 18 Dec 2022 21:44:31 +0100 Subject: [PATCH 3/7] Working simulation detector and fully keyboard control --- autonomy/Detectors/simulation_noGPU.py | 17 ++++++++--------- communicationThreads/GUI/Sender.py | 2 +- .../Simulation/okon_sim_client.py | 12 ++++++++++-- controlThread/controlThread_simulation.py | 16 +++++++++++++--- main_GUI.py | 8 +++----- 5 files changed, 35 insertions(+), 20 deletions(-) diff --git a/autonomy/Detectors/simulation_noGPU.py b/autonomy/Detectors/simulation_noGPU.py index 2e73cf9..e5babae 100644 --- a/autonomy/Detectors/simulation_noGPU.py +++ b/autonomy/Detectors/simulation_noGPU.py @@ -9,26 +9,25 @@ class Simulation_noGPU_detector(base.DetectorBaseClass): cameraStream = None - def __init__(self, cameraStream, controlThread): + def __init__(self, cameraStream, controlThread, client): super().__init__() # control thread is used for obtaining position and attitude data self.controlThread = controlThread self.cameraStream = cameraStream - self.client = SimulationClient() + self.client = client if cameraStream == None: return None j = 0 def DetectorTask(self): - time.sleep(1) + time.sleep(1 / 30) LastDetections = list() detection = self.get_detection() - detection = detection["detected"] + # detection = detection["detected"] for i in detection: - if i["visibleInFrame"]: - o = self.handle_detection(i) - LastDetections.append(o.toDictionary()) + o = self.handle_detection(i) + LastDetections.append(o.toDictionary()) #################################### """ @@ -42,7 +41,7 @@ def DetectorTask(self): fdc.close() self.j +=1 """ - fps = 1 + fps = 30 self.InvokeCallback(fps, LastDetections) def handle_detection(self, detection): @@ -91,4 +90,4 @@ def handle_detection(self, detection): def get_detection(self): # camera stream is based on simulation web api. - return self.client.get_detection() + return self.client.okon.get_visible_detection() diff --git a/communicationThreads/GUI/Sender.py b/communicationThreads/GUI/Sender.py index aa54654..7b1cb37 100644 --- a/communicationThreads/GUI/Sender.py +++ b/communicationThreads/GUI/Sender.py @@ -88,7 +88,7 @@ def SendBattery(self): def SendDetection(self, fps, detectionList): key = bytes([Protocol.TO_GUI.AUTONOMY_MSG.DETECTION]) test = {"fps": fps, "ObjectsList": detectionList} - print(test) + # print(test) data = json.dumps({"fps": fps, "ObjectsList": detectionList}) data = data.encode() self.SendAutonomyMsg(data, key) diff --git a/communicationThreads/Simulation/okon_sim_client.py b/communicationThreads/Simulation/okon_sim_client.py index a6108cd..4624722 100644 --- a/communicationThreads/Simulation/okon_sim_client.py +++ b/communicationThreads/Simulation/okon_sim_client.py @@ -153,6 +153,14 @@ def get_detection(self, className: str): ) ) + def get_visible_detection(self): + return list( + filter( + lambda d: d["visibleInFrame"], + self.sens["detection"], + ) + ) + def set_stable_rot(self, x: float = None, y: float = None, z: float = None, add=False) -> None: if x is not None: if add: @@ -348,8 +356,8 @@ def _handle_packet(self, packet_type: int, packet_flag: int, data_bytes: bytes): checkpoints = json.loads(data_bytes.decode()) self.simulation.checkpoints = checkpoints elif packet_type == PacketType.HIT_NGZ: - ngz = json.loads(data_bytes.decode()) - self._emit_event("hitNGZ", ngz["id"]) + ngz = data_bytes.decode() + self._emit_event("hitNGZ", ngz) elif packet_type == PacketType.HIT_FZ: fz = json.loads(data_bytes.decode()) self._emit_event("hitFZ", fz["id"]) diff --git a/controlThread/controlThread_simulation.py b/controlThread/controlThread_simulation.py index 3b66431..3da4179 100644 --- a/controlThread/controlThread_simulation.py +++ b/controlThread/controlThread_simulation.py @@ -12,13 +12,20 @@ def __init__(self, client): # self.PIDThread = PIDThread(self.client, self.HandleNewData) def HandleSteeringInput(self, data): - print("Handling steering...") + if data[0] != 0 or data[1] != 0 or data[2] != 0: + rot = self.client.okon.sens["imu"]["rot"] + self.client.okon.set_stable_rot( + y=rot["y"] + data[2] / 100, z=rot["z"] - data[0] / 100, x=rot["x"] + data[1] / 100 + ) + if data[3] != 0: + self.client.okon.set_stable_vel(z=data[3] / 1000) + if data[4] != 0: + self.client.okon.set_depth(self.client.okon.sens["baro"] / 9800 - data[4] / 200) # self.PIDThread.HandleSteeringInput(data) # common methods for mode 1 and 2 def disarm(self): super().disarm() - print("Handling steering...") self.client.okon.disarm_motors() # self.PIDThread.disarm() @@ -70,12 +77,15 @@ def setPIDs(self, arg): def getPIDs(self): print("Get pids") - return None + return [0] * 16 # return self.PIDThread.GetPIDs() def HandleNewData(self, data): + attitude, gyro, acc, mag, depth, angular_velocity, position, velocity, acceleration, motors = data val = {} + test_string = "foo" + eval(test_string) for i in self.keys: val[i] = eval(i) self.NewDataCallback.Invoke(val) diff --git a/main_GUI.py b/main_GUI.py index 08fea9f..2625633 100644 --- a/main_GUI.py +++ b/main_GUI.py @@ -38,9 +38,7 @@ # init autonomy helpers # we can switch them according to environment - # detector = Simulation_noGPU_detector(cameraStream, controlThread) - - detector = None + detector = Simulation_noGPU_detector(cameraStream, controlThread, simulation_client) controller = Controller(controlThread) # init autonomy @@ -50,7 +48,7 @@ # # load config and start camera controlThread.setPIDs(ConfigLoader.LoadPIDs("config/PID_simulation.json")) cameraStream.start() - # detector.StartDetecting() + detector.StartDetecting() # lines only for gui mode = "simulation" @@ -68,7 +66,7 @@ server.SetCallbacks(c, d) # event handling - make sure that arguments are matching - # detector.RegisterDetectionCallback(server.sender.SendDetection) + detector.RegisterDetectionCallback(server.sender.SendDetection) # start when ready server.StartServer() From c0489ef03d3d8977e0b12175d4ab835ca2a3bd9e Mon Sep 17 00:00:00 2001 From: Patryk Niemiec Date: Mon, 19 Dec 2022 22:53:37 +0100 Subject: [PATCH 4/7] Add verbosity to isort and --diff to black --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d15767f..b8e967e 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -24,11 +24,11 @@ jobs: - name: python-isort uses: isort/isort-action@v1.1.0 with: - configuration: "--check-only --diff --line-length 120 --profile black" + configuration: "--check-only --diff --verbose --line-length 120 --profile black" - name: Black formatter uses: psf/black@stable with: - options: "--check --verbose --line-length 120" + options: "--check --diff --verbose --line-length 120" # TODO: Support linting with flake8 and maybe testing? From f9ac23bbc9d2a290764ea814df879ecbdda27bd8 Mon Sep 17 00:00:00 2001 From: niemiaszek Date: Sun, 17 Jul 2022 07:59:34 +0200 Subject: [PATCH 5/7] Add initial ZED camera streaming --- cameraStream/ZEDStreamClient.py | 105 ++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 cameraStream/ZEDStreamClient.py diff --git a/cameraStream/ZEDStreamClient.py b/cameraStream/ZEDStreamClient.py new file mode 100644 index 0000000..92bb2c1 --- /dev/null +++ b/cameraStream/ZEDStreamClient.py @@ -0,0 +1,105 @@ +"""ZED Camera streaming +""" +import io +import math +import threading +import time + +import cv2 +import numpy as np +import pyzed.sl as sl +from PIL import Image +from typing_extensions import Self + +import communicationThreads.Simulation.simulationClient as sc +import variable +from cameraStream.stream import cameraStream + + +class ZEDSenderThread: + """Local streaming of ZED processed data""" + + def __init__(self) -> None: + threading.Thread.__init__(self) + self.active = True + + def run(self): + init = sl.InitParameters() + init.camera_resolution = sl.RESOLUTION.HD2K + init.depth_mode = sl.DEPTH_MODE.ULTRA + cam = sl.Camera() + + status = cam.open(init) + if status != sl.ERROR_CODE.SUCCESS: + print(repr(status)) + exit(1) + + runtime = sl.RuntimeParameters() + + stream = sl.StreamingParameters() + stream.codec = sl.STREAMING_CODEC.H265 + stream.bitrate = 7000 + status = cam.enable_streaming(stream) + if status != sl.ERROR_CODE.SUCCESS: + print(repr(status)) + exit(1) + + while self.active: + cam.grab(runtime) + + cam.disable_streaming() + cam.close() + + +class ZEDStreamClient(cameraStream): + """Stream client using ZED camera""" + + def __init__(self): + threading.Thread.__init__(self) + self.frame = None + self.depth_map = None + self.point_cloud = None + + def run(self): + init = sl.InitParameters() + init.camera_resolution = sl.RESOLUTION.HD2K + init.depth_mode = sl.DEPTH_MODE.ULTRA + cam = sl.Camera() + status = cam.open(init) + if status != sl.ERROR_CODE.SUCCESS: + print(repr(status)) + exit(1) + + runtime = sl.RuntimeParameters() + img = sl.Mat() + depth_map = sl.Mat() + point_cloud = sl.Mat() + + while self.active: + err = cam.grab(runtime) + if err == sl.ERROR_CODE.SUCCESS: + cam.retrieve_image(img, sl.VIEW.LEFT) + cam.retrieve_measure(depth_map, sl.MEASURE.DEPTH) + cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA) + + # save numpys ? + self.frame = img.get_data() + self.depth_map = depth_map.get_data() + self.point_cloud = point_cloud.get_data() + + def getFrame(self): + img = Image.fromarray(self.frame) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format="JPEG") + + return img_byte_arr.getvalue() + + def get_depth_map(self): + depth = Image.fromarray(self.depth_map) + depth_byte_arr = io.BytesIO() + depth.save(depth_byte_arr, format="JPEG") + + return depth_byte_arr.getvalue() + + def getPointCloud(self): + return self.point_cloud From 971f7f6b15f3fe3291c763a04669a71dc6ab28ae Mon Sep 17 00:00:00 2001 From: niemiaszek Date: Sun, 17 Jul 2022 08:13:51 +0200 Subject: [PATCH 6/7] Cleanup, some typing added --- cameraStream/ZEDStreamClient.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/cameraStream/ZEDStreamClient.py b/cameraStream/ZEDStreamClient.py index 92bb2c1..11cdd5c 100644 --- a/cameraStream/ZEDStreamClient.py +++ b/cameraStream/ZEDStreamClient.py @@ -1,18 +1,12 @@ -"""ZED Camera streaming +"""ZED Camera local streaming """ import io -import math import threading -import time -import cv2 import numpy as np import pyzed.sl as sl from PIL import Image -from typing_extensions import Self -import communicationThreads.Simulation.simulationClient as sc -import variable from cameraStream.stream import cameraStream @@ -71,6 +65,7 @@ def run(self): exit(1) runtime = sl.RuntimeParameters() + img = sl.Mat() depth_map = sl.Mat() point_cloud = sl.Mat() @@ -82,24 +77,27 @@ def run(self): cam.retrieve_measure(depth_map, sl.MEASURE.DEPTH) cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA) - # save numpys ? + # save numpys self.frame = img.get_data() self.depth_map = depth_map.get_data() self.point_cloud = point_cloud.get_data() - def getFrame(self): + def getFrame(self) -> bytes: + """Prepares byte representation of jpeg image""" img = Image.fromarray(self.frame) img_byte_arr = io.BytesIO() img.save(img_byte_arr, format="JPEG") return img_byte_arr.getvalue() - def get_depth_map(self): + def getDepthMap(self) -> bytes: + """Prepare byte representation of jpeg depth map""" depth = Image.fromarray(self.depth_map) depth_byte_arr = io.BytesIO() depth.save(depth_byte_arr, format="JPEG") return depth_byte_arr.getvalue() - def getPointCloud(self): + def getPointCloud(self) -> np.ndarray: + """Numpy array representing cloud of points with XYZ""" return self.point_cloud From e23cf9c4b2b8c0b51ddaee52600c241c2afce0f9 Mon Sep 17 00:00:00 2001 From: niemiaszek Date: Tue, 2 Aug 2022 02:33:44 +0200 Subject: [PATCH 7/7] Change local streaming to composition of camera thread inside camera streamer --- cameraStream/ZEDStream.py | 54 +++++++++++++++++ cameraStream/ZEDStreamClient.py | 103 -------------------------------- zed/ZEDCamera.py | 71 ++++++++++++++++++++++ 3 files changed, 125 insertions(+), 103 deletions(-) create mode 100644 cameraStream/ZEDStream.py delete mode 100644 cameraStream/ZEDStreamClient.py create mode 100644 zed/ZEDCamera.py diff --git a/cameraStream/ZEDStream.py b/cameraStream/ZEDStream.py new file mode 100644 index 0000000..7bd2687 --- /dev/null +++ b/cameraStream/ZEDStream.py @@ -0,0 +1,54 @@ +"""ZED Camera local streaming +""" +import io +import threading + +import numpy as np +import pyzed.sl as sl +from PIL import Image + +from cameraStream.stream import cameraStream +from zed.ZEDCamera import ZEDCamera + + +class ZEDStream(cameraStream): + """Streamer using ZED camera. + + Produces byte representations of JPEG images/depth + from camera copies, which can be further streamed to GUI. + """ + + def __init__(self, camera: ZEDCamera, lock: threading.Lock): + threading.Thread.__init__(self) + self.camera = camera + self.frame = None + self.depth_map = None + self.point_cloud = None + self.lock = lock + + def run(self): + while self.active: + with self.lock: + self.frame = self.camera.frame + self.depth_map = self.camera.depth_map + self.point_cloud = self.camera.point_cloud + + def getFrame(self) -> bytes: + """Prepares byte representation of jpeg image""" + img = Image.fromarray(self.frame) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format="JPEG") + + return img_byte_arr.getvalue() + + def getDepthMap(self) -> bytes: + """Prepare byte representation of jpeg depth map""" + depth = Image.fromarray(self.depth_map) + depth_byte_arr = io.BytesIO() + depth.save(depth_byte_arr, format="JPEG") + + return depth_byte_arr.getvalue() + + def getPointCloud(self) -> np.ndarray: + """Numpy array representing cloud of points with XYZ""" + return self.point_cloud diff --git a/cameraStream/ZEDStreamClient.py b/cameraStream/ZEDStreamClient.py deleted file mode 100644 index 11cdd5c..0000000 --- a/cameraStream/ZEDStreamClient.py +++ /dev/null @@ -1,103 +0,0 @@ -"""ZED Camera local streaming -""" -import io -import threading - -import numpy as np -import pyzed.sl as sl -from PIL import Image - -from cameraStream.stream import cameraStream - - -class ZEDSenderThread: - """Local streaming of ZED processed data""" - - def __init__(self) -> None: - threading.Thread.__init__(self) - self.active = True - - def run(self): - init = sl.InitParameters() - init.camera_resolution = sl.RESOLUTION.HD2K - init.depth_mode = sl.DEPTH_MODE.ULTRA - cam = sl.Camera() - - status = cam.open(init) - if status != sl.ERROR_CODE.SUCCESS: - print(repr(status)) - exit(1) - - runtime = sl.RuntimeParameters() - - stream = sl.StreamingParameters() - stream.codec = sl.STREAMING_CODEC.H265 - stream.bitrate = 7000 - status = cam.enable_streaming(stream) - if status != sl.ERROR_CODE.SUCCESS: - print(repr(status)) - exit(1) - - while self.active: - cam.grab(runtime) - - cam.disable_streaming() - cam.close() - - -class ZEDStreamClient(cameraStream): - """Stream client using ZED camera""" - - def __init__(self): - threading.Thread.__init__(self) - self.frame = None - self.depth_map = None - self.point_cloud = None - - def run(self): - init = sl.InitParameters() - init.camera_resolution = sl.RESOLUTION.HD2K - init.depth_mode = sl.DEPTH_MODE.ULTRA - cam = sl.Camera() - status = cam.open(init) - if status != sl.ERROR_CODE.SUCCESS: - print(repr(status)) - exit(1) - - runtime = sl.RuntimeParameters() - - img = sl.Mat() - depth_map = sl.Mat() - point_cloud = sl.Mat() - - while self.active: - err = cam.grab(runtime) - if err == sl.ERROR_CODE.SUCCESS: - cam.retrieve_image(img, sl.VIEW.LEFT) - cam.retrieve_measure(depth_map, sl.MEASURE.DEPTH) - cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA) - - # save numpys - self.frame = img.get_data() - self.depth_map = depth_map.get_data() - self.point_cloud = point_cloud.get_data() - - def getFrame(self) -> bytes: - """Prepares byte representation of jpeg image""" - img = Image.fromarray(self.frame) - img_byte_arr = io.BytesIO() - img.save(img_byte_arr, format="JPEG") - - return img_byte_arr.getvalue() - - def getDepthMap(self) -> bytes: - """Prepare byte representation of jpeg depth map""" - depth = Image.fromarray(self.depth_map) - depth_byte_arr = io.BytesIO() - depth.save(depth_byte_arr, format="JPEG") - - return depth_byte_arr.getvalue() - - def getPointCloud(self) -> np.ndarray: - """Numpy array representing cloud of points with XYZ""" - return self.point_cloud diff --git a/zed/ZEDCamera.py b/zed/ZEDCamera.py new file mode 100644 index 0000000..f591ca4 --- /dev/null +++ b/zed/ZEDCamera.py @@ -0,0 +1,71 @@ +"""Camera thread for images/depth/POC processing""" +import io +import socket +import string +import struct +import sys +import threading +from time import sleep +from typing import Tuple + +import cv2 +import numpy as np +import pyzed.sl as sl +from PIL import Image + + +class ZEDCamera(threading.Thread): + def __init__(self, image_size: Tuple[int, int] = None, lock=threading.Lock) -> None: + threading.Thread.__init__(self) + self.active = True + self.frame = None + self.depth_map = None + self.point_cloud = None + self.image_size = image_size + self.lock = lock + + def stop(self): + """Stop the main funtion causing the camera to close""" + with self.lock: + self.active = False + + def run(self): + """Main function executed in thread after start() method""" + init = sl.InitParameters() + init.camera_resolution = sl.RESOLUTION.HD2K + init.depth_mode = sl.DEPTH_MODE.ULTRA + cam = sl.Camera() + + status = cam.open(init) + if status != sl.ERROR_CODE.SUCCESS: + print(repr(status)) + sys.exit(1) + + runtime = sl.RuntimeParameters() + # Prepare data containers + image_size = cam.get_camera_information().camera_resolution + if self.image_size: + image_size.width = self.image_size[0] + image_size.height = self.image_size[1] + + img = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4) + depth_map = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4) + point_cloud = sl.Mat() + + while self.active: + err = cam.grab(runtime) + # Retrive data from camera frame + if err == sl.ERROR_CODE.SUCCESS: + cam.retrieve_image(img, sl.VIEW.LEFT, sl.MEM.CPU, image_size) + cam.retrieve_measure(depth_map, sl.MEASURE.DEPTH, sl.MEM.CPU, image_size) + cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA) + + with self.lock: + # save numpys for processing in other classes + self.frame = img.get_data() + self.depth_map = depth_map.get_data() + self.point_cloud = point_cloud.get_data() + + sleep(0.01) + + cam.close()