#include "BasePresenter.h" #include "VrLog.h" #include "VrError.h" #include #include #include #include "PathManager.h" BasePresenter::BasePresenter(QObject *parent) : QObject(parent) , m_currentCameraIndex(0) , m_bCameraConnected(false) , m_bAlgoDetectThreadRunning(false) , m_pCameraReconnectTimer(nullptr) { // 创建相机重连定时器 m_pCameraReconnectTimer = new QTimer(this); m_pCameraReconnectTimer->setInterval(2000); // 默认2秒 connect(m_pCameraReconnectTimer, &QTimer::timeout, this, &BasePresenter::OnCameraReconnectTimer); } BasePresenter::~BasePresenter() { // 停止检测线程 StopAlgoDetectThread(); // 停止重连定时器 StopCameraReconnectTimer(); // 清理相机设备 for (auto& camera : m_vrEyeDeviceList) { if (camera.second) { camera.second->CloseDevice(); delete camera.second; camera.second = nullptr; } } m_vrEyeDeviceList.clear(); } int BasePresenter::Init() { LOG_INFO("BasePresenter::Init()\n"); int nRet = SUCCESS; nRet = InitApp(); ERR_CODE_RETURN(nRet); // 初始化算法参数 nRet = InitAlgoParams(); LOG_INFO("Algorithm parameters initialization result : %d\n", nRet); return nRet; } int BasePresenter::StartDetection(int cameraIndex, bool isAuto) { LOG_INFO("[BasePresenter] StartDetection - cameraIndex=%d, isAuto=%d\n", cameraIndex, isAuto); // 设置当前相机索引 if (cameraIndex >= 0 && cameraIndex != -1) { m_currentCameraIndex = cameraIndex; } int currentCamera = m_currentCameraIndex; // 检查相机列表是否为空 if (m_vrEyeDeviceList.empty()) { LOG_ERROR("[BasePresenter] No camera device found\n"); return ERR_CODE(DEV_NOT_FIND); } // 清空检测数据缓存 ClearDetectionDataCache(); int nRet = SUCCESS; // 启动指定相机(cameraIndex为相机ID,从1开始编号) int arrayIndex = currentCamera - 1; // 转换为数组索引(从0开始) // 检查相机是否连接 if (arrayIndex < 0 || arrayIndex >= static_cast(m_vrEyeDeviceList.size()) || m_vrEyeDeviceList[arrayIndex].second == nullptr) { LOG_ERROR("[BasePresenter] Camera %d is not connected or invalid\n", currentCamera); return ERR_CODE(DEV_NOT_FIND); } IVrEyeDevice* pDevice = m_vrEyeDeviceList[arrayIndex].second; // 获取数据类型(由子类决定) EVzResultDataType eDataType = GetDetectionDataType(); // 设置状态回调 VzNL_OnNotifyStatusCBEx statusCallback = GetCameraStatusCallback(); pDevice->SetStatusCallback(statusCallback, this); // 获取检测回调函数(由子类提供) VzNL_AutoOutputLaserLineExCB detectCallback = GetDetectionCallback(); // 开始检测 nRet = pDevice->StartDetect(detectCallback, eDataType, this); LOG_INFO("[BasePresenter] Camera %d start detection result: %d\n", currentCamera, nRet); if (nRet == SUCCESS) { // 启动算法检测线程 StartAlgoDetectThread(); } return nRet; } int BasePresenter::StopDetection() { LOG_INFO("[BasePresenter] StopDetection\n"); // 停止所有相机的检测 for (size_t i = 0; i < m_vrEyeDeviceList.size(); ++i) { IVrEyeDevice* pDevice = m_vrEyeDeviceList[i].second; if (pDevice) { int ret = pDevice->StopDetect(); if (ret == 0) { LOG_INFO("[BasePresenter] Camera %zu stop detection successfully\n", i + 1); } else { LOG_WARNING("[BasePresenter] Camera %zu stop detection failed, error code: %d\n", i + 1, ret); } } } // 停止算法检测线程 StopAlgoDetectThread(); return SUCCESS; } int BasePresenter::GetDetectionDataCacheSize() const { std::lock_guard lock(const_cast(m_detectionDataMutex)); return static_cast(m_detectionDataCache.size()); } int BasePresenter::SaveDetectionDataToFile(const std::string& filePath) { LOG_INFO("[BasePresenter] 保存检测数据到文件: %s\n", filePath.c_str()); std::lock_guard lock(m_detectionDataMutex); if(m_detectionDataCache.empty()){ LOG_WARNING("[BasePresenter] 检测数据缓存为空,无数据可保存\n"); return ERR_CODE(DATA_ERR_INVALID); } int lineNum = static_cast(m_detectionDataCache.size()); float scanSpeed = 0.0f; int maxTimeStamp = 0; int clockPerSecond = 0; int result = m_dataLoader.SaveLaserScanData(filePath, m_detectionDataCache, lineNum, scanSpeed, maxTimeStamp, clockPerSecond); if (result == SUCCESS) { LOG_INFO("[BasePresenter] 成功保存 %d 行检测数据到文件: %s\n", lineNum, filePath.c_str()); } else { LOG_ERROR("[BasePresenter] 保存检测数据失败,错误: %s\n", m_dataLoader.GetLastError().c_str()); } return result; } int BasePresenter::LoadDebugDataAndDetect(const std::string& filePath) { LOG_INFO("[BasePresenter] Loading debug data from file: %s\n", filePath.c_str()); std::string fileName = QFileInfo(QString::fromStdString(filePath)).fileName().toStdString(); OnStatusUpdate(QString("加载文件:%1").arg(fileName.c_str()).toStdString()); SetWorkStatus(WorkStatus::Working); int lineNum = 0; float scanSpeed = 0.0f; int maxTimeStamp = 0; int clockPerSecond = 0; int result = SUCCESS; // 1. 清空现有的检测数据缓存 ClearDetectionDataCache(); // 2. 加载数据到缓存 { std::lock_guard lock(m_detectionDataMutex); result = m_dataLoader.LoadLaserScanData(filePath, m_detectionDataCache, lineNum, scanSpeed, maxTimeStamp, clockPerSecond); } if (result != SUCCESS) { LOG_ERROR("[BasePresenter] 加载调试数据失败: %s\n", m_dataLoader.GetLastError().c_str()); OnStatusUpdate("调试数据加载失败"); return result; } OnStatusUpdate(QString("成功加载 %1 行调试数据").arg(lineNum).toStdString()); LOG_INFO("[BasePresenter] 成功加载 %d 行调试数据\n", lineNum); // 3. 执行检测任务 result = DetectTask(); return result; } void BasePresenter::SetCameraStatusCallback(VzNL_OnNotifyStatusCBEx fNotify, void* param) { for (size_t i = 0; i < m_vrEyeDeviceList.size(); i++) { IVrEyeDevice* pDevice = m_vrEyeDeviceList[i].second; if (pDevice) { pDevice->SetStatusCallback(fNotify, param); LOG_DEBUG("[BasePresenter] Status callback set for camera %zu\n", i + 1); } } } void BasePresenter::SetWorkStatus(WorkStatus status) { if (m_currentWorkStatus != status) { m_currentWorkStatus = status; LOG_INFO("[BasePresenter] Work status changed to: %s\n", WorkStatusToString(status).c_str()); // 调用虚函数通知子类,子类可以在此调用UI回调 OnWorkStatusChanged(status); } } // ============ InitCamera 完整实现 ============ int BasePresenter::InitCamera(std::vector& cameraList, bool bRGB, bool bSwing) { LOG_INFO("[BasePresenter] InitCamera\n"); m_bRGB = bRGB; m_bSwing = bSwing; // 保存相机配置信息,用于重连尝试 m_expectedList = cameraList; // 通知UI相机个数 int cameraCount = cameraList.size(); OnCameraCountChanged(cameraCount); // 初始化相机列表,预分配空间 m_vrEyeDeviceList.resize(cameraCount, std::make_pair("", nullptr)); for(int i = 0; i < cameraCount; i++) { m_vrEyeDeviceList[i] = std::make_pair(cameraList[i].name, nullptr); } LOG_INFO("[BasePresenter] camera count : %d\n", cameraCount); // 尝试初始化所有相机 bool allCamerasConnected = true; if(cameraCount > 0){ // 循环打开所有配置的相机 for (int i = 0; i < cameraCount; i++) { int cameraIndex = i + 1; // 相机索引从1开始 int nRet = OpenDevice(cameraIndex, cameraList[i].name.c_str(), cameraList[i].ip.c_str(), bRGB, bSwing); bool isConnected = (nRet == SUCCESS); // 通知相机状态变化 OnCameraStatusChanged(cameraIndex, isConnected); if (!isConnected) { allCamerasConnected = false; LOG_WARNING("[BasePresenter] 相机%d (%s) 连接失败\n", cameraIndex, cameraList[i].name.c_str()); } else { LOG_INFO("[BasePresenter] 相机%d (%s) 连接成功\n", cameraIndex, cameraList[i].name.c_str()); } } } else { // 没有配置相机,创建一个默认项 m_vrEyeDeviceList.resize(1, std::make_pair("", nullptr)); DeviceInfo devInfo; devInfo.index = 1; devInfo.ip = ""; devInfo.name = "相机"; m_expectedList.push_back(devInfo); int nRet = OpenDevice(1, "相机", nullptr, bRGB, bSwing); if (nRet != SUCCESS) { allCamerasConnected = false; } // 通知相机状态变化 OnCameraStatusChanged(1, SUCCESS == nRet); } // 检查连接状态 int connectedCount = 0; for (const auto& device : m_vrEyeDeviceList) { if (device.second != nullptr) { connectedCount++; } } m_bCameraConnected = (connectedCount > 0); // 至少有一个相机连接成功 // 设置默认相机索引为第一个连接的相机 m_currentCameraIndex = 1; // 默认从1开始 for (int i = 0; i < static_cast(m_vrEyeDeviceList.size()); i++) { if (m_vrEyeDeviceList[i].second != nullptr) { m_currentCameraIndex = i + 1; // 找到第一个连接的相机 break; } } LOG_INFO("[BasePresenter] 相机初始化完成: %d/%d 台相机连接成功, 默认相机索引: %d\n", connectedCount, m_expectedList.size(), m_currentCameraIndex); // 如果不是所有期望的相机都连接成功,启动重连定时器 if (!allCamerasConnected && !m_expectedList.empty()) { LOG_INFO("[BasePresenter] 部分相机未连接 (%d/%d),启动重连定时器\n", connectedCount, m_expectedList.size()); StartCameraReconnectTimer(); } else if (allCamerasConnected) { LOG_INFO("[BasePresenter] 所有相机连接成功\n"); // 确保定时器停止 StopCameraReconnectTimer(); } else { LOG_WARNING("[BasePresenter] 没有配置相机 (expectedCount=%d)\n", m_expectedList.size()); } return SUCCESS; } // ============ OpenDevice 完整实现 ============ int BasePresenter::OpenDevice(int cameraIndex, const char* cameraName, const char* cameraIp, bool bRGB, bool bSwing) { LOG_INFO("[BasePresenter] OpenDevice - index %d (%s, %s)\n", cameraIndex, cameraName, cameraIp ? cameraIp : "NULL"); // 1. 创建相机设备对象 IVrEyeDevice* pDevice = nullptr; IVrEyeDevice::CreateObject(&pDevice); if (!pDevice) { LOG_ERROR("[BasePresenter] Failed to create IVrEyeDevice object\n"); return ERR_CODE(DEV_OPEN_ERR); } // 2. 初始化设备 int nRet = pDevice->InitDevice(); if(nRet != SUCCESS){ delete pDevice; LOG_ERROR("[BasePresenter] InitDevice failed, error code: %d\n", nRet); } ERR_CODE_RETURN(nRet); // 3. 打开相机设备 nRet = pDevice->OpenDevice(cameraIp, bRGB, bSwing); LOG_INFO("[BasePresenter] OpenDevice camera %d (%s/%s) result: %d \n", cameraIndex, bRGB ? "RGB" : "Normal", bSwing ? "Swing" : "Normal", nRet); // 4. 处理打开结果 bool cameraConnected = (SUCCESS == nRet); if(!cameraConnected){ delete pDevice; // 释放失败的设备 pDevice = nullptr; } else { // 设置状态回调(调用子类提供的回调函数) VzNL_OnNotifyStatusCBEx callback = GetCameraStatusCallback(); nRet = pDevice->SetStatusCallback(callback, this); LOG_DEBUG("[BasePresenter] SetStatusCallback result: %d\n", nRet); if (nRet != SUCCESS) { delete pDevice; pDevice = nullptr; } } LOG_DEBUG("[BasePresenter] Camera %d (%s) connected %s\n", cameraIndex, cameraName, cameraConnected ? "success" : "failed"); // 6. 存储到设备列表 int arrIdx = cameraIndex - 1; if(m_vrEyeDeviceList.size() > static_cast(arrIdx)){ m_vrEyeDeviceList[arrIdx] = std::make_pair(cameraName, pDevice); } else { LOG_WARNING("[BasePresenter] Camera index %d out of range, list size: %zu\n", cameraIndex, m_vrEyeDeviceList.size()); } return nRet; } // ============ AlgoDetectThreadFunc 实现 ============ void BasePresenter::AlgoDetectThreadFunc() { LOG_INFO("[BasePresenter] 算法检测线程启动\n"); while(m_bAlgoDetectThreadRunning) { std::unique_lock lock(m_algoDetectMutex); // 等待检测触发(子类需要调用 m_algoDetectCondition.notify_one() 来触发) m_algoDetectCondition.wait(lock); if(!m_bAlgoDetectThreadRunning){ break; } LOG_INFO("[BasePresenter] 检测线程被唤醒,开始执行检测任务\n"); // 执行检测任务 int nRet = DetectTask(); if(nRet != SUCCESS){ LOG_ERROR("[BasePresenter] 检测任务执行失败,错误码: %d\n", nRet); } else { LOG_INFO("[BasePresenter] 检测任务执行成功\n"); } } LOG_INFO("[BasePresenter] 算法检测线程退出\n"); } // ============ DetectTask 实现 ============ int BasePresenter::DetectTask() { LOG_INFO("[BasePresenter] DetectTask - 开始执行检测任务\n"); // 1. 验证检测数据缓存 { std::lock_guard lock(m_detectionDataMutex); if (m_detectionDataCache.empty()) { LOG_WARNING("[BasePresenter] 检测数据缓存为空\n"); return ERR_CODE(DEV_DATA_INVALID); } LOG_INFO("[BasePresenter] 检测数据缓存大小: %zu\n", m_detectionDataCache.size()); } // 2. 调用子类实现的算法检测,传入缓存数据引用 LOG_INFO("[BasePresenter] 调用 ProcessAlgoDetection 执行算法检测\n"); int nRet = ProcessAlgoDetection(m_detectionDataCache); if (nRet != SUCCESS) { LOG_ERROR("[BasePresenter] ProcessAlgoDetection 执行失败,错误码: %d\n", nRet); return nRet; } LOG_INFO("[BasePresenter] DetectTask - 检测任务执行成功\n"); return SUCCESS; } void BasePresenter::StartAlgoDetectThread() { if (m_bAlgoDetectThreadRunning) { LOG_WARNING("[BasePresenter] 算法检测线程已经在运行\n"); return; } m_bAlgoDetectThreadRunning = true; // 启动检测线程 m_algoDetectThread = std::thread(&BasePresenter::AlgoDetectThreadFunc, this); m_algoDetectThread.detach(); // 分离线程,让其独立运行 LOG_INFO("[BasePresenter] 算法检测线程已启动\n"); } void BasePresenter::StopAlgoDetectThread() { if (!m_bAlgoDetectThreadRunning) { return; } LOG_INFO("[BasePresenter] 正在停止算法检测线程...\n"); m_bAlgoDetectThreadRunning = false; // 唤醒可能在等待的线程 m_algoDetectCondition.notify_all(); // 注意:因为线程使用了 detach(),所以不需要 join() LOG_INFO("[BasePresenter] 算法检测线程已停止\n"); } void BasePresenter::ClearDetectionDataCache() { std::lock_guard lock(m_detectionDataMutex); m_detectionDataCache.clear(); LOG_DEBUG("[BasePresenter] 检测数据缓存已清空\n"); } void BasePresenter::AddDetectionDataToCache(EVzResultDataType dataType, const SVzLaserLineData& laserData) { std::lock_guard lock(m_detectionDataMutex); m_detectionDataCache.push_back(std::make_pair(dataType, laserData)); } // 通用的静态检测数据回调函数实现 void BasePresenter::_StaticDetectionCallback(EVzResultDataType eDataType, SVzLaserLineData* pLaserLinePoint, void* pUserData) { // 验证输入参数 if (!pLaserLinePoint) { LOG_WARNING("[BasePresenter Detection Callback] pLaserLinePoint is null\n"); return; } if (pLaserLinePoint->nPointCount <= 0) { LOG_WARNING("[BasePresenter Detection Callback] Point count is zero or negative: %d\n", pLaserLinePoint->nPointCount); return; } if (!pLaserLinePoint->p3DPoint) { LOG_WARNING("[BasePresenter Detection Callback] p3DPoint is null\n"); return; } // 获取 BasePresenter 实例指针 BasePresenter* pThis = reinterpret_cast(pUserData); if (!pThis) { LOG_ERROR("[BasePresenter Detection Callback] pUserData is null\n"); return; } // 创建 SVzLaserLineData 副本 SVzLaserLineData lineData; memset(&lineData, 0, sizeof(SVzLaserLineData)); // 根据数据类型分配和复制点云数据 if (eDataType == keResultDataType_Position) { // 复制 SVzNL3DPosition 数据 if (pLaserLinePoint->p3DPoint && pLaserLinePoint->nPointCount > 0) { lineData.p3DPoint = new SVzNL3DPosition[pLaserLinePoint->nPointCount]; if (lineData.p3DPoint) { memcpy(lineData.p3DPoint, pLaserLinePoint->p3DPoint, sizeof(SVzNL3DPosition) * pLaserLinePoint->nPointCount); } lineData.p2DPoint = new SVzNL2DPosition[pLaserLinePoint->nPointCount]; if (lineData.p2DPoint) { memcpy(lineData.p2DPoint, pLaserLinePoint->p2DPoint, sizeof(SVzNL2DPosition) * pLaserLinePoint->nPointCount); } } } else if (eDataType == keResultDataType_PointXYZRGBA) { // 复制 SVzNLPointXYZRGBA 数据 if (pLaserLinePoint->p3DPoint && pLaserLinePoint->nPointCount > 0) { lineData.p3DPoint = new SVzNLPointXYZRGBA[pLaserLinePoint->nPointCount]; if (lineData.p3DPoint) { memcpy(lineData.p3DPoint, pLaserLinePoint->p3DPoint, sizeof(SVzNLPointXYZRGBA) * pLaserLinePoint->nPointCount); } lineData.p2DPoint = new SVzNL2DLRPoint[pLaserLinePoint->nPointCount]; if (lineData.p2DPoint) { memcpy(lineData.p2DPoint, pLaserLinePoint->p2DPoint, sizeof(SVzNL2DLRPoint) * pLaserLinePoint->nPointCount); } } } // 复制其他字段 lineData.nPointCount = pLaserLinePoint->nPointCount; lineData.llTimeStamp = pLaserLinePoint->llTimeStamp; lineData.llFrameIdx = pLaserLinePoint->llFrameIdx; lineData.nEncodeNo = pLaserLinePoint->nEncodeNo; lineData.fSwingAngle = pLaserLinePoint->fSwingAngle; lineData.bEndOnceScan = pLaserLinePoint->bEndOnceScan; // 添加到检测数据缓存 pThis->AddDetectionDataToCache(eDataType, lineData); } // 通用的静态相机状态回调函数实现 void BasePresenter::_StaticCameraStatusCallback(EVzDeviceWorkStatus eStatus, void* pExtData, unsigned int nDataLength, void* pInfoParam) { LOG_DEBUG("[BasePresenter Camera Status Callback] received: status=%d\n", (int)eStatus); // 获取 BasePresenter 实例指针 BasePresenter* pThis = reinterpret_cast(pInfoParam); if (!pThis) { LOG_ERROR("[BasePresenter Camera Status Callback] pInfoParam is null\n"); return; } switch (eStatus) { case EVzDeviceWorkStatus::keDeviceWorkStatus_Offline: { LOG_WARNING("[BasePresenter Camera Status Callback] Camera device offline/disconnected\n"); // 更新相机连接状态 pThis->m_bCameraConnected = false; // 通知子类相机状态变更(这里暂时通知相机1,实际应用中可能需要区分) pThis->OnCameraStatusChanged(1, false); break; } case EVzDeviceWorkStatus::keDeviceWorkStatus_Eye_Reconnect: { LOG_INFO("[BasePresenter Camera Status Callback] Camera device online/connected\n"); // 更新相机连接状态 pThis->m_bCameraConnected = true; // 通知子类相机状态变更 pThis->OnCameraStatusChanged(1, true); break; } case EVzDeviceWorkStatus::keDeviceWorkStatus_Device_Swing_Finish: { LOG_INFO("[BasePresenter Camera Status Callback] Received scan finish signal from camera\n"); // 通知算法检测线程开始处理 pThis->m_algoDetectCondition.notify_one(); break; } default: break; } } void BasePresenter::StartCameraReconnectTimer() { LOG_DEBUG("[BasePresenter] StartCameraReconnectTimer called\n"); // 使用QMetaObject::invokeMethod确保在正确的线程中操作定时器 QMetaObject::invokeMethod(this, [this]() { if (!m_pCameraReconnectTimer) { LOG_ERROR("[BasePresenter] m_pCameraReconnectTimer is nullptr!\n"); return; } if (m_pCameraReconnectTimer->isActive()) { LOG_DEBUG("[BasePresenter] 相机重连定时器已在运行\n"); return; } m_pCameraReconnectTimer->start(); LOG_INFO("[BasePresenter] 启动相机重连定时器(间隔: %d ms)\n", m_pCameraReconnectTimer->interval()); }, Qt::QueuedConnection); } void BasePresenter::StopCameraReconnectTimer() { // 使用QMetaObject::invokeMethod确保在正确的线程中操作定时器 QMetaObject::invokeMethod(this, [this]() { if (m_pCameraReconnectTimer && m_pCameraReconnectTimer->isActive()) { m_pCameraReconnectTimer->stop(); LOG_INFO("[BasePresenter] 停止相机重连定时器\n"); } }, Qt::QueuedConnection); } // ============ OnCameraReconnectTimer 实现 ============ void BasePresenter::OnCameraReconnectTimer() { #ifdef _WIN32 return; #endif // 调用子类实现的重连逻辑 bool allConnected = TryReconnectCameras(); if (allConnected) { LOG_INFO("[BasePresenter] 所有相机重连成功,停止定时器\n"); StopCameraReconnectTimer(); } } // ============ TryReconnectCameras 默认实现 ============ bool BasePresenter::TryReconnectCameras() { LOG_DEBUG("[BasePresenter] TryReconnectCameras all %zd \n", m_expectedList.size()); bool allConnected = true; int connectedCount = 0; // 遍历所有配置的相机,尝试重连失败的相机 for (int i = 0; i < static_cast(m_expectedList.size()); i++) { // 检查该位置的相机是否已连接 if (i < static_cast(m_vrEyeDeviceList.size()) && m_vrEyeDeviceList[i].second != nullptr) { // 相机已连接,跳过 connectedCount++; continue; } // 尝试重连相机 int cameraIndex = i + 1; // 相机索引从1开始 const DeviceInfo& cameraInfo = m_expectedList[i]; LOG_DEBUG("[BasePresenter] 尝试重连相机 %d (%s, %s)\n", cameraIndex, cameraInfo.name.c_str(), cameraInfo.ip.c_str()); // 调用 OpenDevice 重连(使用初始化时的 RGB/Swing 参数) int nRet = OpenDevice(cameraIndex, cameraInfo.name.c_str(), cameraInfo.ip.c_str(), m_bRGB, m_bSwing); OnCameraStatusChanged(cameraIndex, SUCCESS == nRet); if (nRet == SUCCESS) { LOG_INFO("[BasePresenter] 相机 %d (%s) 重连成功\n", cameraIndex, cameraInfo.name.c_str()); connectedCount++; } else { LOG_DEBUG("[BasePresenter] 相机 %d (%s) 重连失败,错误码: %d\n", cameraIndex, cameraInfo.name.c_str(), nRet); allConnected = false; } } // 更新相机连接状态 m_bCameraConnected = (connectedCount > 0); // 更新默认相机索引为第一个连接的相机 for (int i = 0; i < static_cast(m_vrEyeDeviceList.size()); i++) { if (m_vrEyeDeviceList[i].second != nullptr) { m_currentCameraIndex = i + 1; break; } } LOG_INFO("[BasePresenter] 相机重连尝试完成: %d/%d 台相机已连接\n", connectedCount, m_expectedList.size()); return (connectedCount == m_expectedList.size() && allConnected); }