基于AidLux实现热成像电力巡检
热成像巡检介绍
输电线路巡检主要分为线路本体、附属设施、通道及电力保护区三大类。其中线路线路本体巡检内容包括绝缘子、金具以及导、地线等。特别的,绝缘子,顾名思义,是隔离导线与塔体,起绝缘作用。
输电线路运行过程中,由于内在或外在的原因,导致绝缘子串的温度异常,甚至导致低零值、零值等问题。
提示:什么是零值和低零值? 绝缘子的电阻值应该达到相当的数量级才能保证绝缘作用。当绝缘子的电阻变得很小,就会出现低零值现象,当绝缘子电阻趋近于0时,就出现了零值问题。绝缘子出现低零值和零值,就基本丧失了绝缘作用。 解决方法:可以通过红外相机进行拍摄,对图片进行温度分析,判断绝缘子能否处于正常工作状态 劣化的陶瓷绝缘子表现为低温、复合绝缘子表现为高温。 在考虑由于绝缘子电压程U型分布,以及导线端电压高于塔头端等呈现的温度分布特征。通过检测、分析绝缘子串的温度信息,可以判断绝缘子是否出现工作异常
热成像巡检 本文使用目标检测网络对旋转后的部件进行定位,具体的网络训练由于数据集过大没有做,所以采用的是老师提供的onnx模型。有需要可以根据r-retinanet网络自己训练一个。
接下来训练好的模型部署在Aidlux平台,所以需要将模型onnx转换为tflite,导出onnx模型以后,我们需要对模型本身进行一定的精简。
onnx导为tflite模型的具体代码如下:
onnx_converter(
onnx_model_path = onnx_path,
need_simplify = False,
output_path = os.path.dirname(onnx_path),
target_formats = ['tflite'], # or ['keras'], ['keras', 'tflite']
weight_quant = False,
int8_model = False,
int8_mean = None,
int8_std = None,
image_root = None
)
def onnx_converter(onnx_model_path:str, output_path:str=None,
input_node_names:list=None, output_node_names:list=None,
need_simplify:bool=True, target_formats:list = ['keras', 'tflite'],
native_groupconv:bool=False,
weight_quant:bool=False, int8_model:bool=False, image_root:str=None,
int8_mean:list or float = [123.675, 116.28, 103.53], int8_std:list or float = [58.395, 57.12, 57.375])->float:
if not isinstance(target_formats, list) and 'keras' not in target_formats and 'tflite' not in target_formats:
raise KeyError("'keras' or 'tflite' should in list")
model_proto = load_onnx_modelproto(onnx_model_path, input_node_names, output_node_names, need_simplify)
keras_model = keras_builder(model_proto, native_groupconv)
if 'tflite' in target_formats:
tflite_model = tflite_builder(keras_model, weight_quant, int8_model, image_root, int8_mean, int8_std)
onnx_path, model_name = os.path.split(onnx_model_path)
if output_path is None:
output_path = onnx_path
output_path = os.path.join(output_path, model_name.split('.')[0])
keras_model_path = None
if 'keras' in target_formats:
keras_model_path = output_path + ".h5"
keras_model.save(keras_model_path)
LOG.info(f"keras model saved in {keras_model_path}")
tflite_model_path = None
if 'tflite' in target_formats:
tflite_model_path = output_path + ".tflite"
with open(tflite_model_path, "wb") as fp:
fp.write(tflite_model)
convert_result = {"keras":keras_model_path, "tflite":tflite_model_path, "keras_error":0, "tflite_error":0}
# ignore quantization model
if int8_model:
return convert_result
error_dict = {}
try:
error_dict = get_elements_error(model_proto, keras_model_path, tflite_model_path)
keras_error, tflite_error = error_dict.get("keras", None), error_dict.get("tflite", None)
if keras_error:
if keras_error > 1e-2:
LOG.error("h5 model elements' max error has reached {:^.4E}, but convert is done, please check {} carefully!".format(keras_error, keras_model_path))
elif keras_error > 1e-4:
LOG.warning("h5 model elements' max error is {:^.4E}, pass, h5 saved in {}".format(keras_error, keras_model_path))
else:
LOG.info("h5 model elements' max error is {:^.4E}, pass, h5 saved in {}".format(keras_error, keras_model_path))
if tflite_error:
if tflite_error > 1e-2:
LOG.error("tflite model elements' max error has reached {:^.4E}, but convert is done, please check {} carefully!".format(tflite_error, tflite_model_path))
elif tflite_error > 1e-4:
LOG.warning("tflite model elements' max error is {:^.4E}, pass, tflite saved in {}".format(tflite_error, tflite_model_path))
else:
LOG.info("tflite model elements' max error is {:^.4E}, pass, tflite saved in {}".format(tflite_error, tflite_model_path))
except:
LOG.warning("convert is successed, but model running is failed, please check carefully!")
convert_result["keras_error"] = error_dict.get("keras", None)
convert_result["tflite_error"] = error_dict.get("tflite", None)
return convert_result
AidLux平台部署
对于开发者而言,AI项目中各种算法的数据集准备+模型训练+模型部署依然存在着不小的难度。AidLux的出现,可以将我们的安卓设备以非虚拟的形式变成同时拥有Android和Linux系统环境的边缘计算设备,支持主流AI框架,非常易于部署,还有专门的接口调度算力资源,极大地降低了AI应用落地门槛。
aidlux实现摄像头提取目标区域的核心代码如下:
def process_img(img, target_size=640, max_size=2000, multiple=32, keep_ratio=True, NCHW=True, ToTensor=True):
img = cv2.resize(img, (640, 512), interpolation=cv2.INTER_LINEAR)
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
# resize with keep_ratio
if keep_ratio:
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im_scale_x = np.floor(img.shape[1] * im_scale / multiple) * multiple / img.shape[1]
im_scale_y = np.floor(img.shape[0] * im_scale / multiple) * multiple / img.shape[0]
image_resized = cv2.resize(img, None, None, fx=im_scale_x, fy=im_scale_y, interpolation=cv2.INTER_LINEAR)
im_scales = np.array([im_scale_x, im_scale_y, im_scale_x, im_scale_y])
im = image_resized / 255.0 # np.float64
im = im.astype(np.float32)
PIXEL_MEANS =(0.485, 0.456, 0.406) # RGB format mean and variances
PIXEL_STDS = (0.229, 0.224, 0.225)
im -= np.array(PIXEL_MEANS)
im /= np.array(PIXEL_STDS)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # BGR2RGB
if NCHW:
im = np.transpose(im, (2, 0, 1)).astype(np.float32) # [SAI-KEY] TensorFlow use input with NHWC.
im = im[np.newaxis, ...]
if ToTensor:
im = torch.from_numpy(im)
return im, im_scales
else:
return None
if __name__=="__main__":
''' 定义输入输出shape '''
in_shape = [1 * 640 * 800 * 3 * 4] # HWC, float32
out_shape = [1 * 53325 * 8 * 4] # 8400: total cells, 52 = 48(num_classes) + 4(xywh), float32
# out_shape = [1 * 55425 * 8 * 4] # 8400: total cells, 52 = 48(num_classes) + 4(xywh), float32
''' AidLite初始化 '''
aidlite = aidlite_gpu.aidlite()
''' 加载R-RetinaNet模型 '''
tflite_model = '/home/R-RetinaNet/models/r-retinanet.tflite'
res = aidlite.ANNModel(tflite_model, in_shape, out_shape, 4, -1) # Infer on -1: cpu, 0: gpu, 1: mixed, 2: dsp
print(res)
cap = cvs.VideoCapture(0)
frame_id = 0
while True:
frame = cap.read()
if frame is None:
continue
frame_id += 1
if frame_id %30 != 0:
continue
time0 = time.time()
im, im_scale = process_img(frame,NCHW=False,ToTensor=False)
''' 设定输入输出 '''
aidlite.setInput_Float32(im, 800, 640)
''' 启动推理 '''
aidlite.invoke()
''' 捕获输出 '''
preds = aidlite.getOutput_Float32(0)
# preds = preds.reshape(1, 8, 53325)
preds = preds.reshape(1, 8, (int)(preds.shape[0]/8))
output = np.transpose(preds, (0, 2, 1))
''' 创建Anchor '''
im_anchor = np.transpose(im, (0, 3, 1, 2)).astype(np.float32)
anchors_list = []
anchor_generator = Anchors(ratios = np.array([0.2, 0.5, 1, 2, 5]))
original_anchors = anchor_generator(im_anchor) # (bs, num_all_achors, 5)
anchors_list.append(original_anchors)
''' 解算输出 '''
decode_output = decoder(im_anchor, anchors_list[-1], output[..., 5:8], output[..., 0:5], thresh=0.5, nms_thresh=0.2, test_conf=None)
for i in range(len(decode_output)):
print("dim({}), shape: {}".format(i, decode_output[i].shape))
''' 重构输出 '''
scores = decode_output[0].reshape(-1, 1)
classes = decode_output[1].reshape(-1, 1)
boxes = decode_output[2]
boxes[:, :4] = boxes[:, :4] / im_scale
if boxes.shape[1] > 5:
boxes[:, 5:9] = boxes[:, 5:9] / im_scale
dets = np.concatenate([classes, scores, boxes], axis=1)
''' 过滤类别 '''
keep = np.where(classes > 0)[0]
dets = dets[keep, :]
''' 转换坐标('xyxya'->'xyxyxyxy') '''
res = sort_corners(rbox_2_quad(dets[:, 2:]))
''' 评估绘图 '''
for k in range(dets.shape[0]):
cv2.line(frame, (int(res[k, 0]), int(res[k, 1])), (int(res[k, 2]), int(res[k, 3])), (0, 255, 0), 3)
cv2.line(frame, (int(res[k, 2]), int(res[k, 3])), (int(res[k, 4]), int(res[k, 5])), (0, 255, 0), 3)
cv2.line(frame, (int(res[k, 4]), int(res[k, 5])), (int(res[k, 6]), int(res[k, 7])), (0, 255, 0), 3)
cv2.line(frame, (int(res[k, 6]), int(res[k, 7])), (int(res[k, 0]), int(res[k, 1])), (0, 255, 0), 3)
# cv2.imwrite("/home/R-RetinaNet/samples/00_detected_image.jpg", img)
cvs.imshow(frame)
实时检测视频如下 [www.bilibili.com/video/BV1oh…]
转载自:https://juejin.cn/post/7228888718655946812