import os import socket import cv2 import argparse import platform import sys import threading import queue import datetime import time import numpy as np from collections import Counter from tflite_runtime.interpreter import Interpreter import configparser from PIL import Image, ImageDraw, ImageOps WINDOW_NAME = 'test_monitor' RECORDING_TIME = 10 CAMERA_WIDTH = 1280 CAMERA_HEIGHT = 720 #初期設定 cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.moveWindow(WINDOW_NAME, 0, 0) #カメラ映像の取得 camera_id = 2 camera = cv2.VideoCapture(camera_id, cv2.CAP_V4L2) camera.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH) camera.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT) fps = int(camera.get(cv2.CAP_PROP_FPS)) #カメラのFPSを取得 w = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)) #カメラの横幅を取得 h = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)) #カメラの縦幅を取得 startflg = False while True: ##############録画############## if not camera is None: ret, current_image = camera.read() if not ret: print('video end.') sys.exit(0) #常時録画の設定 if startflg == False: #動画ファイルの保存設定 recording_time = RECORDING_TIME #録画時間(seconds) now = datetime.datetime.now() movname = now.strftime('%Y%m%d%H%M%S') + '.mp4' #動画ファイル保存先 pipeline = f"appsrc ! autovideoconvert ! video/x-raw,width={w},height={h},framerate={fps}/1 ! queue ! vpuenc_h264 ! h264parse ! qtmux ! filesink location={movname} sync=false" video = cv2.VideoWriter(pipeline, 0, fps, (w, h)) #動画の仕様 recordinglogs_start = cv2.getTickCount() #録画開始時刻 startflg = True #常時録画開始 if startflg == True: #動画保存 video.write(current_image) elapsed_time = (cv2.getTickCount() - recordinglogs_start) / cv2.getTickFrequency() if elapsed_time >= recording_time: startflg = False video.release() #表示 cv2.imshow(WINDOW_NAME, current_image) cv2.waitKey(1)