2023-09-14 22:11:13 +00:00
|
|
|
import asyncio
|
|
|
|
import os
|
2023-09-17 15:35:07 +00:00
|
|
|
import time
|
2023-09-14 22:11:13 +00:00
|
|
|
import traceback
|
|
|
|
import warnings
|
2023-09-17 15:35:07 +00:00
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
|
|
|
from datetime import datetime
|
|
|
|
from typing import cast, List
|
2023-09-14 22:11:13 +00:00
|
|
|
|
|
|
|
import aiohttp
|
|
|
|
import cv2
|
2023-09-17 15:35:07 +00:00
|
|
|
import numpy as np
|
2023-09-14 22:11:13 +00:00
|
|
|
import telegram
|
|
|
|
from aiohttp import BasicAuth
|
2024-02-22 00:10:33 +00:00
|
|
|
from cv2 import aruco
|
2023-09-14 22:11:13 +00:00
|
|
|
from pytapo import Tapo
|
|
|
|
from telegram import Update, Message
|
|
|
|
from telegram.ext import Updater
|
|
|
|
from urllib3.exceptions import InsecureRequestWarning
|
|
|
|
|
|
|
|
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
|
|
|
|
|
|
|
|
|
|
|
|
class Bot:
|
|
|
|
def __init__(self):
|
|
|
|
self.token = os.environ["BOT_TOKEN"]
|
|
|
|
self.camera_ip = os.environ["CAMERA_IP"]
|
|
|
|
self.camera_user = os.environ["CAMERA_USER"]
|
|
|
|
self.camera_password = os.environ["CAMERA_PASSWORD"]
|
|
|
|
self.chat_id = int(os.environ["CHAT_ID"])
|
|
|
|
self.profile_name = os.environ.get("CAMERA_PROFILE_NAME", "board")
|
|
|
|
self.openhab_url = os.environ["OPENHAB_URL"]
|
|
|
|
self.openhab_token = os.environ["OPENHAB_TOKEN"]
|
|
|
|
self.openhab_item = os.environ["OPENHAB_ITEM"]
|
|
|
|
|
|
|
|
self.bot = telegram.Bot(token=self.token)
|
|
|
|
self.me = None
|
|
|
|
|
|
|
|
self.tapo = Tapo(self.camera_ip, self.camera_user, self.camera_password)
|
2023-09-17 15:35:07 +00:00
|
|
|
self.executor = ThreadPoolExecutor(max_workers=len(os.sched_getaffinity(0)))
|
2023-09-14 22:11:13 +00:00
|
|
|
|
2024-02-22 00:04:15 +00:00
|
|
|
self.last_aruco_corners = {}
|
|
|
|
|
2023-09-14 22:11:13 +00:00
|
|
|
def _get_presets(self):
|
|
|
|
presets = self.tapo.getPresets()
|
|
|
|
return {v: k for k, v in presets.items()}
|
|
|
|
|
|
|
|
async def _get_item_state(self):
|
|
|
|
url = f"{self.openhab_url}/rest/items/{self.openhab_item}/state"
|
|
|
|
# use aiohttp instead of requests to avoid blocking
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
async with session.get(url, auth=BasicAuth(self.openhab_token, "")) as resp:
|
|
|
|
return await resp.text()
|
|
|
|
|
|
|
|
async def _send_item_command(self, command):
|
2023-09-14 22:57:47 +00:00
|
|
|
print(f"Sending command {command}")
|
2023-09-14 22:11:13 +00:00
|
|
|
url = f"{self.openhab_url}/rest/items/{self.openhab_item}"
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
async with session.post(
|
|
|
|
url,
|
|
|
|
auth=BasicAuth(self.openhab_token, ""),
|
|
|
|
data=command,
|
|
|
|
headers={"Content-Type": "text/plain", "Accept": "*/*"},
|
|
|
|
) as resp:
|
|
|
|
return await resp.text()
|
|
|
|
|
2023-09-17 15:35:07 +00:00
|
|
|
def _take_photo_blocking(
|
|
|
|
self,
|
|
|
|
adjust_perspective=True,
|
|
|
|
timeout: float = 5.0,
|
|
|
|
) -> List[cv2.typing.MatLike]:
|
2023-09-17 15:54:18 +00:00
|
|
|
privacy_mode = self.tapo.getPrivacyMode()
|
|
|
|
|
2023-09-17 15:35:07 +00:00
|
|
|
# Prepare the camera
|
|
|
|
print("Disabling privacy mode and setting auto day/night mode...")
|
|
|
|
self.tapo.setPrivacyMode(False)
|
|
|
|
self.tapo.setDayNightMode("auto")
|
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
# Take the color image
|
2023-09-14 22:11:13 +00:00
|
|
|
vcap = cv2.VideoCapture(
|
|
|
|
f"rtsp://{self.camera_user}:{self.camera_password}@{self.camera_ip}:554/stream1"
|
|
|
|
)
|
2023-09-17 15:35:07 +00:00
|
|
|
print("Taking color image...")
|
|
|
|
ret, pretty_image = vcap.read()
|
|
|
|
while pretty_image is None:
|
|
|
|
ret, pretty_image = vcap.read()
|
|
|
|
|
|
|
|
if not adjust_perspective:
|
2023-09-17 15:54:18 +00:00
|
|
|
self.tapo.setPrivacyMode(privacy_mode)
|
2023-09-17 15:35:07 +00:00
|
|
|
return [pretty_image]
|
|
|
|
|
2023-09-17 16:07:21 +00:00
|
|
|
self.tapo.setDayNightMode("on")
|
|
|
|
|
2023-09-17 15:35:07 +00:00
|
|
|
# Iterate until we find all 4 aruco markers or timeout
|
|
|
|
aruco_corners = {}
|
|
|
|
annotated_image = None
|
2024-08-11 18:23:47 +00:00
|
|
|
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
|
2024-02-22 00:10:33 +00:00
|
|
|
aruco_params = aruco.DetectorParameters()
|
2023-09-17 15:35:07 +00:00
|
|
|
|
|
|
|
t0 = time.time()
|
|
|
|
print("Taking image with ArUco markers...")
|
|
|
|
while len(aruco_corners) < 4:
|
|
|
|
if time.time() - t0 > timeout:
|
2024-02-22 00:04:15 +00:00
|
|
|
a = self.last_aruco_corners.copy()
|
|
|
|
a.update(aruco_corners)
|
|
|
|
aruco_corners = a
|
|
|
|
|
|
|
|
if len(aruco_corners) == 4:
|
|
|
|
print("Timeout waiting for ArUco markers, using cached corners")
|
|
|
|
break
|
|
|
|
|
2023-09-17 15:35:07 +00:00
|
|
|
print(
|
|
|
|
"Timeout waiting for ArUco markers, returning only original image"
|
|
|
|
)
|
2023-09-17 15:54:18 +00:00
|
|
|
self.tapo.setPrivacyMode(privacy_mode)
|
|
|
|
self.tapo.setDayNightMode("auto")
|
2023-09-17 15:35:07 +00:00
|
|
|
return [pretty_image]
|
|
|
|
|
|
|
|
ret, annotated_image = vcap.read()
|
|
|
|
if not ret:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Detect the markers
|
2024-02-22 00:10:33 +00:00
|
|
|
corners, ids, rejected = aruco.detectMarkers(
|
2023-09-17 15:35:07 +00:00
|
|
|
annotated_image, aruco_dict, parameters=aruco_params
|
|
|
|
)
|
2024-02-22 00:10:33 +00:00
|
|
|
if corners is not None and ids is not None:
|
|
|
|
for corner, i in zip(corners, ids):
|
|
|
|
aruco_corners[i[0]] = corner
|
2023-09-17 15:35:07 +00:00
|
|
|
assert annotated_image is not None
|
|
|
|
|
|
|
|
del vcap
|
|
|
|
|
|
|
|
print(
|
|
|
|
"Found all ArUco markers, enabled privacy mode and set auto day/night mode..."
|
|
|
|
)
|
|
|
|
self.tapo.setDayNightMode("auto")
|
2023-09-17 15:54:18 +00:00
|
|
|
self.tapo.setPrivacyMode(privacy_mode)
|
2023-09-17 15:35:07 +00:00
|
|
|
|
|
|
|
corners = [aruco_corners[i] for i in range(1, 5)]
|
|
|
|
ids = np.array([[i] for i in range(1, 5)])
|
2024-02-22 00:10:33 +00:00
|
|
|
aruco.drawDetectedMarkers(annotated_image, corners, ids)
|
2023-09-17 15:35:07 +00:00
|
|
|
|
|
|
|
# Annotate the image with the detected markers and apply the perspective transform to the pretty image
|
|
|
|
|
|
|
|
# Get the outermost points of each marker
|
|
|
|
bl_marker = corners[0].squeeze()[3] # bottom left marker
|
|
|
|
tl_marker = corners[1].squeeze()[0] # top left marker
|
|
|
|
tr_marker = corners[2].squeeze()[1] # top right marker
|
|
|
|
bc_marker = corners[3].squeeze()[3] # bottom center marker
|
|
|
|
|
|
|
|
# Calculate the fourth point by computing the line through the bottom markers and intersecting with the vertical
|
|
|
|
# line through the top right marker
|
|
|
|
slope = (bc_marker[1] - bl_marker[1]) / (bc_marker[0] - bl_marker[0])
|
|
|
|
y_intersection = slope * (tr_marker[0] - bc_marker[0]) + bc_marker[1]
|
|
|
|
fourth_point = [tr_marker[0], y_intersection]
|
|
|
|
|
|
|
|
rectangle_points = np.array(
|
|
|
|
[tl_marker, bl_marker, fourth_point, tr_marker], dtype="float32"
|
|
|
|
)
|
|
|
|
|
|
|
|
# Expand the rectangle slightly
|
|
|
|
centroid = np.mean(rectangle_points, axis=0)
|
|
|
|
expanded_rectangle_points = (
|
|
|
|
rectangle_points + (rectangle_points - centroid) * 0.025
|
|
|
|
)
|
|
|
|
|
|
|
|
# Draw the expanded rectangle on the annotated image
|
|
|
|
cv2.polylines(
|
|
|
|
annotated_image,
|
|
|
|
[np.int32(expanded_rectangle_points)],
|
|
|
|
isClosed=True,
|
|
|
|
color=(0, 255, 0),
|
|
|
|
thickness=3,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Define destination points for perspective transform, maintaining a 3:2 aspect ratio
|
|
|
|
width, height = 300 * 5, 200 * 5
|
|
|
|
dst_pts = np.array(
|
|
|
|
[[0, 0], [0, height], [width, height], [width, 0]], dtype="float32"
|
|
|
|
)
|
|
|
|
|
|
|
|
matrix = cv2.getPerspectiveTransform(expanded_rectangle_points, dst_pts)
|
|
|
|
warped = cv2.warpPerspective(pretty_image, matrix, (width, height))
|
|
|
|
|
2024-02-22 00:04:15 +00:00
|
|
|
self.last_aruco_corners = aruco_corners
|
|
|
|
|
2023-09-17 15:35:07 +00:00
|
|
|
return [warped, annotated_image]
|
|
|
|
|
|
|
|
async def _photo_command(self, chat_id: int, adjust_perspective: bool = True):
|
|
|
|
await self.bot.send_chat_action(chat_id=chat_id, action="upload_photo")
|
|
|
|
|
|
|
|
photos = await self.take_photo(adjust_perspective=adjust_perspective)
|
|
|
|
jpegs = [cv2.imencode(".jpg", photo)[1].tobytes() for photo in photos]
|
|
|
|
|
|
|
|
media = [
|
|
|
|
telegram.InputMediaPhoto(
|
|
|
|
media=telegram.InputFile(jpeg, filename=f"photo{i}.jpg", attach=True),
|
|
|
|
filename=f"photo{i}.jpg",
|
|
|
|
)
|
|
|
|
for i, jpeg in enumerate(jpegs)
|
|
|
|
]
|
|
|
|
|
|
|
|
await self.bot.send_media_group(
|
|
|
|
chat_id=chat_id,
|
|
|
|
media=media,
|
|
|
|
caption=str(datetime.now().strftime("%A, %B %d, %Y %H:%M:%S")),
|
|
|
|
)
|
2023-09-14 22:11:13 +00:00
|
|
|
|
|
|
|
async def parse_message(self, msg: Message):
|
|
|
|
match msg.text:
|
|
|
|
case "/start":
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text="Hello, I'm a bot that can send you a photo from the camera.",
|
|
|
|
)
|
|
|
|
case "/reposition":
|
2023-09-17 15:54:18 +00:00
|
|
|
privacy_mode = self.tapo.getPrivacyMode()
|
|
|
|
self.tapo.setPrivacyMode(False)
|
2023-09-14 22:11:13 +00:00
|
|
|
presets = self._get_presets()
|
|
|
|
if self.profile_name not in presets:
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text=f"Profile '{self.profile_name}' not found",
|
|
|
|
)
|
|
|
|
return
|
|
|
|
self.tapo.setPreset(presets[self.profile_name])
|
2023-09-17 16:04:35 +00:00
|
|
|
message = await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text=f"Repositioning to profile '{self.profile_name}'...",
|
|
|
|
)
|
|
|
|
await asyncio.sleep(5)
|
2023-09-17 15:54:18 +00:00
|
|
|
self.tapo.setPrivacyMode(privacy_mode)
|
2023-09-17 16:04:35 +00:00
|
|
|
await self.bot.edit_message_text(
|
2023-09-14 22:11:13 +00:00
|
|
|
chat_id=msg.chat_id,
|
2023-09-17 16:04:35 +00:00
|
|
|
message_id=message.message_id,
|
|
|
|
text=f"Repositioning complete",
|
2023-09-14 22:11:13 +00:00
|
|
|
)
|
|
|
|
case "/calibrate":
|
2023-09-17 15:54:18 +00:00
|
|
|
privacy_mode = self.tapo.getPrivacyMode()
|
2023-09-17 16:04:35 +00:00
|
|
|
self.tapo.setPrivacyMode(False)
|
|
|
|
await asyncio.sleep(0.3)
|
2023-09-14 22:11:13 +00:00
|
|
|
self.tapo.calibrateMotor()
|
2023-09-17 16:04:35 +00:00
|
|
|
message = await self.bot.send_message(
|
2023-09-14 22:11:13 +00:00
|
|
|
chat_id=msg.chat_id,
|
2023-09-17 16:04:35 +00:00
|
|
|
text=f"Calibrating, this will take ~25s...",
|
2023-09-14 22:11:13 +00:00
|
|
|
)
|
2023-09-17 16:04:35 +00:00
|
|
|
await asyncio.sleep(26)
|
|
|
|
await self.bot.edit_message_text(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
message_id=message.message_id,
|
|
|
|
text=f"Calibration complete",
|
|
|
|
)
|
|
|
|
self.tapo.setPrivacyMode(privacy_mode)
|
2023-09-14 22:11:13 +00:00
|
|
|
case "/light_on":
|
|
|
|
await self._send_item_command("ON")
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text=f"Light turned on",
|
|
|
|
)
|
|
|
|
case "/light_off":
|
|
|
|
await self._send_item_command("OFF")
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text=f"Light turned off",
|
|
|
|
)
|
|
|
|
case "/light_status":
|
|
|
|
state = await self._get_item_state()
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text=f"Light is {state}",
|
|
|
|
)
|
|
|
|
case "/photo":
|
2023-09-17 15:35:07 +00:00
|
|
|
await self._photo_command(msg.chat_id, adjust_perspective=True)
|
|
|
|
case "/photo_unprocessed":
|
|
|
|
await self._photo_command(msg.chat_id, adjust_perspective=False)
|
2023-09-17 15:54:18 +00:00
|
|
|
case "/privacy_on":
|
|
|
|
self.tapo.setPrivacyMode(True)
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text=f"Privacy mode turned on",
|
|
|
|
)
|
|
|
|
case "/privacy_off":
|
|
|
|
self.tapo.setPrivacyMode(False)
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text=f"Privacy mode turned off",
|
|
|
|
)
|
|
|
|
case "/privacy_status":
|
|
|
|
state = self.tapo.getPrivacyMode()
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=msg.chat_id,
|
|
|
|
text=f"Privacy mode is {state and 'enabled' or 'disabled'}",
|
|
|
|
)
|
2023-09-14 22:11:13 +00:00
|
|
|
|
2023-09-17 15:35:07 +00:00
|
|
|
async def take_photo(
|
|
|
|
self, adjust_perspective=True, timeout=5.0
|
|
|
|
) -> List[cv2.typing.MatLike]:
|
|
|
|
item_state = await self._get_item_state()
|
|
|
|
if item_state == "OFF":
|
|
|
|
print("Turning light on")
|
|
|
|
await self._send_item_command("ON")
|
2023-09-14 22:11:13 +00:00
|
|
|
|
2023-09-17 15:35:07 +00:00
|
|
|
try:
|
|
|
|
return await asyncio.get_event_loop().run_in_executor(
|
|
|
|
self.executor, self._take_photo_blocking, adjust_perspective, timeout
|
|
|
|
)
|
|
|
|
finally:
|
|
|
|
if item_state == "OFF":
|
|
|
|
print("Turning light back off")
|
|
|
|
await self._send_item_command("OFF")
|
2023-09-14 22:11:13 +00:00
|
|
|
|
|
|
|
async def run(self):
|
|
|
|
async with self.bot:
|
|
|
|
self.me = await self.bot.get_me()
|
|
|
|
|
|
|
|
updater = Updater(bot=self.bot, update_queue=asyncio.Queue())
|
|
|
|
async with updater:
|
|
|
|
queue = await updater.start_polling(allowed_updates=[Update.MESSAGE])
|
2023-09-17 15:35:07 +00:00
|
|
|
print("Bot is up and running")
|
2023-09-14 22:11:13 +00:00
|
|
|
|
|
|
|
while True:
|
|
|
|
# noinspection PyBroadException
|
|
|
|
try:
|
|
|
|
upd = cast(Update, await queue.get())
|
|
|
|
print(upd)
|
|
|
|
if not upd.message or upd.message.chat_id != self.chat_id:
|
|
|
|
print("Ignoring message")
|
|
|
|
continue
|
2023-09-17 15:35:07 +00:00
|
|
|
# noinspection PyBroadException
|
|
|
|
try:
|
|
|
|
await self.parse_message(upd.message)
|
|
|
|
except Exception:
|
|
|
|
traceback.print_exc()
|
|
|
|
exc = traceback.format_exc()
|
|
|
|
await self.bot.send_message(
|
|
|
|
chat_id=self.chat_id,
|
|
|
|
text=f"Error: {exc}",
|
|
|
|
)
|
2023-09-14 22:11:13 +00:00
|
|
|
except Exception:
|
|
|
|
traceback.print_exc()
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
bot = Bot()
|
|
|
|
asyncio.run(bot.run())
|