You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

817 lines
32 KiB

import copy
import os
import glob
import threading
from dataclasses import asdict
from datetime import datetime
import json
import queue
import time
from time import sleep
from typing import Tuple
import cv2
import numpy as np
import logging
import configSer
import models.target
import models.sampleMsg
import upload.DataReporter
import utils
import videoPush
from stabilize.algorithm.adaptiveSmoothingWindow import AdaptiveSmoothingWindow
from models.msg import Msg
logging.basicConfig(level=logging.DEBUG)
drawing: bool = False # 是否正在绘制
is_video_mode: bool = False # 是否采用标靶图覆盖
# 定义点
start_point: models.target.Point
end_point: models.target.Point
# 配置
configObj:configSer.ConfigOperate
reporter:upload.DataReporter.DataReporter
# 鼠标回调函数
def add_rectangle(event, x, y, flags, param):
global start_point, end_point, drawing
if event == cv2.EVENT_LBUTTONDOWN: # 左键按下
logging.info("左键按下")
start_point = models.target.Point(x, y)
end_point = start_point
drawing = True
elif event == cv2.EVENT_MOUSEMOVE: # 鼠标移动
if drawing:
end_point = models.target.Point(x, y)
elif event == cv2.EVENT_LBUTTONUP: # 左键抬起
logging.info("左键抬起")
drawing = False
end_point = models.target.Point(x, y)
if start_point == end_point:
return
distance = cv2.norm(tuple(start_point), tuple(end_point), cv2.NORM_L2)
if distance < 20:
logging.info("距离小于20,无效区域")
return
target_id = len(configObj.config_info.targets)
# 圆标靶半径 mm
radius = 40.0
area=models.target.RectangleArea(int(start_point.x),int(start_point.y),
int(end_point.x-start_point.x),int(end_point.y-start_point.y))
num=len(configObj.config_info.targets.items())
t_info=models.target.TargetInfo( target_id,
f"bb_{num}",
area,
radius,
models.target.Threshold(120,1),
False)
new_target = models.target.CircleTarget(t_info,None,None)
logging.info(f"新增区域[{target_id}] => {start_point, end_point}")
configObj.config_info.targets[target_id] = new_target
def read_target_rectangle():
return configObj.config_info.targets
class VideoProcessor:
adaptive_smoother:AdaptiveSmoothingWindow
capture: cv2.VideoCapture
capturePath: str=""
is_opened: bool= False
is_running = True
is_clear_zero:bool=False
last_save_abnormal_img_time=time.time()
last_save_lost_img_time = time.time()
# 全局配置锁
targets_lock: threading.Lock = threading.Lock()
def __init__(self):
print("初始化 VideoProcessor")
pass
def on_data(self,msg:Msg):
global configObj,is_video_mode,reporter
logging.info(f"msg={msg}")
match msg.cmd:
case "getBase":
base_info = asdict(configObj.config_info)
base_info.pop("targets",None)
base_info.pop("server", None)
base_info.pop("upload", None)
resp_msg = models.msg.Msg(_from="dev", cmd="getBase", values={"base": base_info})
resp_json = resp_msg.to_json_()
return resp_json
case "getPoints":
targets=copy.deepcopy(configObj.config_info.targets)
for k,v in targets.items():
targets[k].handler_info=None
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"targets": targets})
resp_json = resp_msg.to_json_()
return resp_json
case "setPoints":
v=msg.values
ts=v["targets"]
with self.targets_lock:
# 清空原配置
configObj.config_info.targets={}
for _,t in ts.items():
t_str=json.dumps(t)
new_c_target = models.target.CircleTarget.from_json(t_str)
configObj.config_info.targets[new_c_target.info.id] =new_c_target
configObj.save2json_file()
resp_msg = models.msg.Msg(_from="dev", cmd="setPoints", values={"operate": True})
resp_json = resp_msg.to_json()
return resp_json
case "getDataFps":
fps=configObj.config_info.fps.data
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"dataFps": fps})
resp_json = resp_msg.to_json()
return resp_json
case "setDataFps":
v = msg.values
fps = v["dataFps"]
configObj.config_info.fps.data=fps
configObj.save2json_file()
reporter.adjust_rate(fps,"data")
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"operate": True})
resp_json = resp_msg.to_json()
return resp_json
case "setCap":
v = msg.values
cap = v["cap"]
self.switch_video(cap)
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"operate": True})
resp_json = resp_msg.to_json()
return resp_json
case "videoMode":
v = msg.values
is_debug = v["debug"]
is_video_mode=is_debug
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"operate": True})
resp_json = resp_msg.to_json()
return resp_json
case "clearZero":
self.is_clear_zero=True
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"operate": True})
resp_json = resp_msg.to_json()
return resp_json
case "getId":
mac_address = configObj.config_info.mac
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"id": mac_address})
resp_json = resp_msg.to_json_()
return resp_json
case "setId":
v = msg.values
mac_id = v["id"]
configObj.config_info.mac=mac_id
configObj.save2json_file()
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"operate": True})
resp_json = resp_msg.to_json_()
return resp_json
case "getAlert":
alert_info = configObj.config_info.alert
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values=alert_info)
resp_json = resp_msg.to_json_()
return resp_json
case "setAlert":
v = msg.values
enable = v["enable"]
interval_sec = v["intervalSec"]
configObj.config_info.alert.enable = enable
configObj.config_info.alert.intervalSec = interval_sec
configObj.save2json_file()
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"operate": True})
resp_json = resp_msg.to_json_()
return resp_json
case "getWin":
win_info = configObj.config_info.win
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values=win_info)
resp_json = resp_msg.to_json_()
return resp_json
case "setWin":
v = msg.values
win= configSer.Win(** v)
configObj.config_info.win=win
configObj.save2json_file()
self.adaptive_smoother.resize(window_size=win.size,volatility_threshold =win.threshold,img_threshold=win.imgThreshold)
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"operate": True})
resp_json = resp_msg.to_json_()
return resp_json
case "getMqtt":
win_info = configObj.config_info.upload
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values=win_info)
resp_json = resp_msg.to_json_()
return resp_json
case "setMqtt":
v = msg.values
mq_upload = configSer.Upload(**v)
configObj.config_info.upload = mq_upload
configObj.save2json_file()
ok=reporter.adjust_mqtt(mq_upload)
resp_msg = models.msg.Msg(_from="dev", cmd=msg.cmd, values={"operate": ok})
resp_json = resp_msg.to_json_()
return resp_json
print("==")
def pre_handler_img(self,gray_frame,now_str:str):
# 将灰度图压缩为 JPEG 格式,并存储到内存缓冲区
img_base64 = utils.frame_to_base64(gray_frame, format="JPEG")
all_img = models.sampleMsg.AllImg(image=img_base64, time=now_str)
self.enqueue_image(all_img)
def draw_rectangle(self,img):
global configObj,is_video_mode
gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
now_time = datetime.now()
now_str = now_time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
#图像发送
if configObj.config_info.fps.video > 0:
self.pre_handler_img(gray_frame,now_str)
if len(configObj.config_info.targets)==0: return
# 基点
base_point_pix:models.target.Point=None
#上报有新数据的点
frame_upload_data = models.sampleMsg.AllSensorData(data=[], time=now_str)
with self.targets_lock:
# 绘图-历史点
for i, tr in configObj.config_info.targets.items():
# logging.info(f"检测--> 标靶[{tr.info.desc}]")
if not hasattr(tr, "info"):
print("====")
_start_point = models.target.Point(tr.info.rectangle_area.x, tr.info.rectangle_area.y)
_end_point = models.target.Point(
tr.info.rectangle_area.x+tr.info.rectangle_area.w,
tr.info.rectangle_area.y+tr.info.rectangle_area.h)
#绘制标靶区域
blue_color = (255, 0, 0)
if tr.info.base:
blue_color=(200, 0, 200) #紫红色 基点
cv2.rectangle(img,tuple(_start_point), tuple(_end_point), blue_color, 2)
label=f"{tr.info.desc},r={tr.info.radius}"
cv2.putText(img, label, (_start_point.x,_start_point.y-6), cv2.FONT_HERSHEY_SIMPLEX, 0.6, blue_color,1)
#检测
# 获取图像尺寸
frame_height, frame_width = gray_frame.shape[:2]
if _end_point.x>frame_width or _end_point.y>frame_height:
logging.warn(f"标靶[{tr.info.desc}]sub_image 超出区域")
continue
sub_image = self.extract_sub_image(gray_frame, _start_point, _end_point)
# ret, sub_binary_frame = cv2.threshold(sub_image, tr.info.threshold.binary, 255, cv2.THRESH_BINARY)
sub_binary_frame=sub_image
# 应用滤波
# sub_binary_frame =cv2.bilateralFilter(sub_binary_frame, 4, 15, 15)
# sub_binary_frame = cv2.cvtColor(sub_binary_frame, cv2.COLOR_BGR2GRAY)
# 高斯滤波
gauss_size=tr.info.threshold.gauss
if gauss_size >1:
sub_binary_frame = cv2.GaussianBlur(sub_binary_frame, (gauss_size, gauss_size), sigmaX=0,sigmaY=0,borderType=cv2.BORDER_REPLICATE)
# sub_binary_frame = cv2.bilateralFilter(sub_binary_frame, 5, 50, 50)
if tr.perspective is not None:
# 宽度
sub_width = sub_binary_frame.shape[1]
# 高度
sub_height = sub_binary_frame.shape[0]
# 先禁用图像变换
# sub_binary_frame = cv2.warpPerspective(sub_binary_frame, tr.perspective, (sub_width, sub_height))
# 覆盖原图
if is_video_mode:
sub_c_img= cv2.cvtColor(sub_binary_frame, cv2.COLOR_GRAY2BGR)
self.cover_sub_image(img,sub_c_img, _start_point, _end_point)
# if __debug__:
# cv2.imshow(f'{tr.info.id}_binaryImg', sub_binary_frame)
#
circles = self.circle_detect_Edges(sub_binary_frame,tr.info.threshold.gradient,tr.info.threshold.anchor)
if __debug__:
cv2.imshow(f'{tr.info.id}_binaryImg', sub_binary_frame)
if len(circles) == 0:
# 识别不到的异常图像
self.save_lost_img(img)
continue
elif len(circles) > 1:
logging.info(f"标靶[{tr.info.desc}],匹配圆{len(circles)}")
i=0
for circle in circles:
logging.info(f"标靶[{tr.info.desc}],圆{i}半径={circle[2]}")
i+=1
center,radius_pix=self.circle_match(circles,_start_point)
# 纪录圆心位置
if tr.handler_info is None:
tr.handler_info= models.target.HandlerInfo()
if not tr.handler_info.is_init:
tr.handler_info.is_init=True
tr.handler_info.center_init = center
#存储首次中心数据
configObj.save2json_file()
# 原始处理
tr.handler_info.center_point=center
tr.handler_info.radius_pix=radius_pix
tr.circle_displacement_pix()
#纪录时间
tr.handler_info.last_detected_time = now_time
# 基点
if tr.info.base:
base_point_pix=tr.handler_info.displacement_pix
# 基于像素点计算 物理距离
for i, tr in configObj.config_info.targets.items():
if tr.handler_info is None:
continue
if tr.handler_info.displacement_pix is None:
continue
# 减去基点偏移
if base_point_pix is not None:
raw_point=tr.handler_info.displacement_pix
tr.handler_info.displacement_pix=models.target.Point(
x=raw_point.x-base_point_pix.x,
y=raw_point.y-base_point_pix.y)
if not tr.info.base:
# print(f"[{tr.info.id}]{tr.info.desc} 原偏 {raw_point} - 基偏{base_point_pix} ={tr.handler_info.displacement_pix}")
pass
tr.circle_displacement_phy()
if tr.handler_info.displacement_phy is not None:
frame_upload_data.data.append(
models.sampleMsg.SensorData(
str(tr.info.id),
tr.info.desc,
tr.handler_info.displacement_phy.x,
tr.handler_info.displacement_phy.y)
)
# 画圆
self.circle_show_phy(img,tr)
tr.handler_info.displacement_pix=None
tr.handler_info.displacement_phy = None
if self.is_clear_zero:
logging.info(f"执行清零")
for t_id, target in configObj.config_info.targets.items():
if target.handler_info is not None:
target.handler_info.center_init = target.handler_info.center_point
self.is_clear_zero = False
#清零重新存储测点位置
configObj.save2json_file()
is_abnormal=self.sink_data(frame_upload_data)
if is_abnormal:
self.save_abnormal_img(img)
def save_abnormal_img(self, frame, imgs_dir="/home/forlinx/Pictures"):
"""
保存当前图像到本地文件系统imgs目录下,最多保留10张图片
"""
# 检查是否需要限制存储频率(每秒最多一张)
current_time = time.time()
offset_time=current_time - self.last_save_abnormal_img_time
if offset_time < 1:
return
else:
self.last_save_abnormal_img_time = current_time
# 检查并创建目录
if not os.path.exists(imgs_dir):
os.makedirs(imgs_dir, exist_ok=True)
# 获取当前时间戳用于命名图片
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{timestamp}.jpg"
filepath = os.path.join(imgs_dir, filename)
# 查找已存在的异常图片文件
existing_files = glob.glob(os.path.join(imgs_dir, "*.jpg"))
# 如果已存在10张或更多图片,删除最早的一张
if len(existing_files) >= 10:
# 按文件名排序(按时间顺序)
existing_files.sort()
# 删除最早的文件
os.remove(existing_files[0])
# 保存当前图片
cv2.imwrite(filepath, frame)
def save_lost_img(self, frame,imgs_dir="/home/forlinx/Pictures/lost"):
# 检查是否需要限制存储频率(每秒最多一张)
current_time = time.time()
offset_time=current_time - self.last_save_lost_img_time
if offset_time < 10:
return
else:
self.last_save_lost_img_time = current_time
logging.info(f"无法识别的帧 => 存储")
# 检查并创建目录
if not os.path.exists(imgs_dir):
os.makedirs(imgs_dir, exist_ok=True)
# 获取当前时间戳用于命名图片
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{timestamp}.jpg"
filepath = os.path.join(imgs_dir, filename)
# 查找已存在的异常图片文件
existing_files = glob.glob(os.path.join(imgs_dir, "*.jpg"))
# 如果已存在10张或更多图片,删除最早的一张
if len(existing_files) >= 10:
# 按文件名排序(按时间顺序)
existing_files.sort()
# 删除最早的文件
os.remove(existing_files[0])
# 保存当前图片
cv2.imwrite(filepath, frame)
def sink_data(self, d: models.sampleMsg.AllSensorData):
is_abnormal= False
# 过滤无效空数据
if len(d.data) == 0:
return is_abnormal
if configObj.config_info.fps.data > 0:
new_d, is_abnormal=self.pre_handler_data(d)
# 处理数据精度
for i, snd in enumerate(new_d.data):
snd.x = round(snd.x, 3)
snd.y = round(snd.y, 3)
new_d.data[i]=snd
self.enqueue_data(new_d)
return is_abnormal
def pre_handler_data(self, data: models.sampleMsg.AllSensorData) -> (models.sampleMsg.AllSensorData,bool):
if self.adaptive_smoother is None:
return data,False
smoothed_result,is_abnormal = self.adaptive_smoother.process(data)
return smoothed_result,is_abnormal
def circle_match(self,circles, rect_s_point:models.target.Point)-> (models.target.Point,float):
circle = max(circles, key=lambda c: c[2])
# 绘制圆心
center = (circle[0] + rect_s_point.x, circle[1] + rect_s_point.y)
radius = float(np.round(circle[2], 3))
cp = models.target.Point(x=round(center[0], 5), y=round(center[1], 5))
return cp,radius
def circle_show(self, img,
center: models.target.Point,radius:float,
rect_s_point: models.target.Point,
rect_e_point: models.target.Point):
font = cv2.FONT_HERSHEY_SIMPLEX
color = (255, 0, 0) # 蓝色
scale = 0.5
center_int = tuple(int(x) for x in center)
radius_int = int(radius)
cv2.circle(img, center_int, 2, (0, 255, 0), 4)
# 绘制外圆
cv2.circle(img, center_int, radius_int, (0, 0, 255), 2)
# 打印圆心坐标
text1 = f"c:{(center.x,center.y,radius)}"
txt_location = (rect_s_point.x+2,rect_e_point.y-2)
# txt_location = (center_int[0] - radius_int, center_int[1] + radius_int + 10)
cv2.putText(img, text1, txt_location, font, scale, color, 1)
def circle_show_phy(self, img,tr: models.target.CircleTarget):
font = cv2.FONT_HERSHEY_SIMPLEX
color = (255, 0, 0) # 蓝色
scale = 0.5
center=tr.handler_info.center_point
center_int = tuple(int(x) for x in center)
radius_int = int(tr.handler_info.radius_pix)
cv2.circle(img, center_int, 2, (0, 255, 0), 4)
# 绘制外圆
cv2.circle(img, center_int, radius_int, (0, 0, 255), 2)
# 打印圆心坐标
text1 = f"c:{(center.x, center.y, tr.handler_info.radius_pix)}"
rect_s_point=tr.info.rectangle_area.x
rect_e_point=tr.info.rectangle_area.y+tr.info.rectangle_area.h-5
txt_location = (rect_s_point,rect_e_point)
# txt_location = (center_int[0] - radius_int, center_int[1] + radius_int + 10)
cv2.putText(img, text1, txt_location, font, scale, color, 1)
txt_location2 = (rect_s_point, rect_e_point+20)
text2 = f"{round(tr.handler_info.displacement_phy.x,3),round(tr.handler_info.displacement_phy.y,3)}"
cv2.putText(img, text2, txt_location2, font, scale, color, 1)
def circle_detect(self, img) -> list:
# 圆心距 canny阈值 最小半径 最大半径
circles_float = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT_ALT, 1, 30, param1=200, param2=0.8, minRadius=15,
maxRadius=0)
# 创建一个0行, 2列的空数组
if circles_float is not None:
# 提取圆心坐标(保留2位小数)
centers = [(round(float(x),2), round(float(y),2), round(float(r),2)) for x, y, r in circles_float[0, :]]
return centers
else:
return []
def circle_detect_Blob(self, img) -> list:
params = cv2.SimpleBlobDetector_Params()
detector = cv2.SimpleBlobDetector.create(params)
keypoints:Tuple = detector.detect(img)
centers=[]
# 创建一个0行, 2列的空数组
if keypoints is not None:
for kp in keypoints:
p=kp.pt
center = (round(float(p[0]),3), round(float(p[1]),3), round(float(kp.size/2),3))
centers.append( center)
return centers
def circle_detect_Edges(self, image,gradient:int,anchor:int) -> list[Tuple[float,float,float]]:
# 创建EdgeDrawing检测器
ed = cv2.ximgproc.createEdgeDrawing()
# 获取参数结构体并设置参数
params = cv2.ximgproc_EdgeDrawing_Params()
params.EdgeDetectionOperator = cv2.ximgproc.EdgeDrawing_SOBEL
params.GradientThresholdValue = gradient #100 # 梯度幅值 ≥ 40 的像素才可能成为“边缘”。
params.AnchorThresholdValue = anchor #80 # 要求该像素的梯度幅值 ≥ Anchor 阈值 才能被选为“锚点”
ed.setParams(params)
#计时
start = cv2.getTickCount()
# 检测边缘
ed.detectEdges(image)
# 检测椭圆和圆形
ellipses = ed.detectEllipses()
end_ellipses = cv2.getTickCount()
duration_ellipses = (end_ellipses - start) * 1000 / cv2.getTickFrequency()
# print(f"EdgeDrawing 椭圆检测耗时: {duration_ellipses:.2f} ms")
# 绘制椭圆和圆形
centers = []
if ellipses is None:
return centers
for ellipseRows in ellipses:
ellipse = ellipseRows[0] # ellipses 结构为 N 1 6 的3维度数组
# center = (int(ellipse[0]), int(ellipse[1]))
# axes = (int(ellipse[2] + ellipse[3]), int(ellipse[2] + ellipse[4]))
# angle = ellipse[5] #椭圆旋转角度(度,0~180)
center = (round(float(ellipse[0]), 3), round(float(ellipse[1]), 3), round(float(ellipse[2]), 3))
#半径 0 忽略
if center[2]<=0:
continue
centers.append(center)
return centers
def extract_sub_image(self,frame, top_left, bottom_right):
"""
从帧中截取子区域
:param frame: 输入的视频帧
:param top_left: 子图片的左上角坐标 (x1, y1)
:param bottom_right: 子图片的右下角坐标 (x2, y2)
:return: 截取的子图片
"""
x1, y1 = top_left
x2, y2 = bottom_right
return frame[y1:y2, x1:x2]
def cover_sub_image(self,frame,sub_frame, top_left, bottom_right):
x1, y1 = top_left
x2, y2 = bottom_right
frame[y1:y2, x1:x2]= sub_frame
return frame
def open_video(self,video_id):
sleep(1)
logging.info(f"打开摄像头 -> {video_id}")
self.capture = cv2.VideoCapture(video_id)
frame_width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
logging.info(f"默认分辨率= {frame_width}*{frame_height}")
logging.info(f"{video_id}地址->{self.capture}")
if not self.capture.isOpened():
self.capture.release()
logging.warn(f"无法打开摄像头{video_id}")
return
if frame_width==640:
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 4224) # 宽度
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 3136) # 高度
logging.info(f"摄像头分辨率 更新->4224*3136")
self.is_opened=True
self.capturePath=video_id
fps = self.capture.get(cv2.CAP_PROP_FPS)
logging.info(f"fps={fps},video_id={video_id},")
def switch_video(self,video_id:str):
print(f"切换摄像头 -> {video_id}")
self.is_opened = False
self.capture.release()
cv2.destroyAllWindows()
if str.isdigit(video_id):
video_id=int(video_id)
self.open_video(video_id)
def clear_zero(self):
self.is_opened = False
def show_video(self):
camera_err_counts=0
camera_switch_counts=0
global sigExit,start_point, end_point, drawing
if __debug__:
cv2.namedWindow('Frame')
cv2.setMouseCallback('Frame', add_rectangle)
# 读取一帧图像
while self.is_running:
if camera_err_counts >= 5:
logging.warn(f"读取摄像头异常,准备重新切换")
self.switch_video(str(self.capturePath))
camera_err_counts = 0
camera_switch_counts += 1
continue
if camera_switch_counts >= 2:
logging.warn(f"切换摄像头异常{camera_switch_counts}次,准备重启")
self.stop()
logging.warn(f"执行退出...")
sleep(2)
os._exit(0)
if not self.is_opened:
logging.warn(f"摄像头 标记is_opened={self.is_opened} [{camera_switch_counts}|{camera_err_counts}]")
sleep(2)
camera_err_counts += 1
continue
# sleep(0.02)
ret, frame = self.capture.read()
if not ret:
camera_err_counts+=1
logging.warn(f"${camera_err_counts}次,无法读取帧,cap地址- >{self.capture}")
sleep(1)
continue
# logging.info(f"处理图像帧==>start")
self.frame_handle(frame)
# cv2.waitKey(1)
# 显示图像
if frame is not None:
if __debug__:
cv2.imshow('Frame', frame)
#缓存到推流
videoPush.update_latest_frame(frame)
logging.warn("退出VideoProcessor")
def show_image(self,frame):
global start_point, end_point, drawing
if __debug__:
cv2.namedWindow('Frame')
cv2.setMouseCallback('Frame', add_rectangle)
# 读取一帧图像
while True:
cp_img=frame.copy()
self.frame_handle(cp_img)
if cv2.waitKey(1) & 0xFF == ord('q'): # 按'q'退出循环
break
cv2.destroyAllWindows()
def frame_handle(self,frame):
# 绘图-历史点
self.draw_rectangle(frame)
# 绘图-实时
if drawing:
cv2.rectangle(frame, tuple(start_point), tuple(end_point), (0, 200, 200), 4)
# print(f"鼠标位置 {start_point} -> {end_point}")
def image_mode(self):
img_raw=cv2.imread('images/trans/_4point.jpg')#images/target/rp80max3.jpg
# img_raw = cv2.imread('images/trans/_4point.jpg') # images/target/rp80max3.jpg
# img_raw = cv2.imread('images/target/rp80.jpg') # images/target/rp80max3.jpg
self.show_image(img_raw)
# 支持
def video_mode(self,video_id:str):
if str.isdigit(video_id):
video_id=int(video_id)
offline_monitor()
self.open_video(video_id)
self.show_video()
# 释放摄像头资源并关闭所有窗口
logging.warn("退出 video")
self.capture.release()
cv2.destroyAllWindows()
def rtsp_mode(self,rtsp_url:str):
# rtsp_url ="rtsp://admin:123456abc@192.168.1.64:554"
# rtsp_url ="rtsp://admin:123456abc@192.168.1.64:554/h264/ch1/main/av_stream"
self.open_video(rtsp_url)
fps = self.capture.get(cv2.CAP_PROP_FPS)
logging.info(f"rtsp fps={fps}")
self.show_video()
# 释放摄像头资源并关闭所有窗口
self.capture.release()
cv2.destroyAllWindows()
def enqueue_data(self,data):
global reporter
# 获取当前时间戳
timestamp = time.time()
# 将时间戳转换为 datetime 对象
dt = datetime.fromtimestamp(timestamp).strftime("%Y%m%d%H%M%S%f")[:-3] # 毫秒部分是微秒的前三位
# 放入图片队列(自动丢弃最旧数据当队列满时)
try:
reporter.data_queue.put((dt, data), block=False)
except queue.Full:
# self.reporter.data_dropped += 1
pass
def enqueue_image(self,data):
global reporter
# 获取当前时间戳
timestamp = time.time()
# 将时间戳转换为 datetime 对象
dt = datetime.fromtimestamp(timestamp).strftime("%Y%m%d%H%M%S%f")[:-3] # 毫秒部分是微秒的前三位
# 放入图片队列(自动丢弃最旧数据当队列满时)
try:
reporter.image_queue.put((dt, data), block=False)
except queue.Full:
pass
#self.reporter.image_dropped += 1
def stop(self):
self.is_running=False
self.capture.release()
def enqueue_alert(data):
global reporter
# 获取当前时间戳
timestamp = time.time()
# 将时间戳转换为 datetime 对象
dt = datetime.fromtimestamp(timestamp).strftime("%Y%m%d%H%M%S%f")[:-3] # 毫秒部分是微秒的前三位
try:
reporter.alert_queue.put((dt, data), block=False)
except queue.Full:
# self.reporter.data_dropped += 1
pass
def task_timeout():
offline_monitor()
if not configObj.config_info.alert.enable:
return
now_time:datetime=datetime.now()
now_str = now_time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
all_upload_alert = models.sampleMsg.AllAlert(alert=[], time=now_str)
for t_id, tr in configObj.config_info.targets.items():
# 是否offline超时告警状态
if tr.handler_info is not None and tr.handler_info.is_init:
delta_s = round((now_time - tr.handler_info.last_detected_time).total_seconds())
if delta_s > configObj.config_info.alert.intervalSec:
# 舍去余数,向下取整到intervalSec的整数倍
adjusted_delta_s = (delta_s // configObj.config_info.alert.intervalSec) * configObj.config_info.alert.intervalSec
ad = models.sampleMsg.AlertData(tr.info.desc, adjusted_delta_s)
all_upload_alert.alert.append(ad)
if len(all_upload_alert.alert) > 0:
enqueue_alert(all_upload_alert)
logging.warn(f"标靶脱靶超时=> {all_upload_alert}")
def offline_monitor():
timer = threading.Timer(configObj.config_info.alert.intervalSec, task_timeout)
timer.start() # 启动定时器