diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 72220974de..162ed8a82b 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -119,7 +119,7 @@ set(SUB_DIRECTORIES_LIST file_server file_server/event file_server/event_handler file_server/event_listener file_server/reader file_server/polling prometheus prometheus/labels prometheus/schedulers prometheus/async ebpf ebpf/observer ebpf/security ebpf/handler - parser sls_control sdk fuse + parser sls_control sdk ) if (LINUX) if (ENABLE_ENTERPRISE) @@ -221,7 +221,6 @@ if (BUILD_LOGTAIL_SHARED_LIBRARY) endif () # Generate independent libraries. -# add_subdirectory(helper) add_subdirectory(go_pipeline) add_subdirectory(common) diff --git a/core/checkpoint/AdhocCheckpointManager.cpp b/core/checkpoint/AdhocCheckpointManager.cpp index 7b05f127cd..7b31530b39 100644 --- a/core/checkpoint/AdhocCheckpointManager.cpp +++ b/core/checkpoint/AdhocCheckpointManager.cpp @@ -20,7 +20,6 @@ #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" #include "common/Thread.h" -#include "fuse/ulogfslib_file.h" #include "common/HashUtil.h" DEFINE_FLAG_INT32(adhoc_checkpoint_dump_thread_wait_interval, "microseconds", 5 * 1000); diff --git a/core/common/HashUtil.cpp b/core/common/HashUtil.cpp index 550216ced8..3b58726971 100644 --- a/core/common/HashUtil.cpp +++ b/core/common/HashUtil.cpp @@ -13,11 +13,14 @@ // limitations under the License. #include "HashUtil.h" + #include + +#include + #include "FileSystemUtil.h" -#include "murmurhash3.h" #include "LogFileOperator.h" -#include +#include "murmurhash3.h" namespace logtail { @@ -335,9 +338,9 @@ bool CheckAndUpdateSignature(const std::string& signature, uint64_t& sigHash, ui return rst; } -bool CheckFileSignature(const std::string& filePath, uint64_t sigHash, uint32_t sigSize, bool fuseMode) { +bool CheckFileSignature(const std::string& filePath, uint64_t sigHash, uint32_t sigSize) { LogFileOperator logFileOp; - logFileOp.Open(filePath.c_str(), fuseMode); + logFileOp.Open(filePath.c_str()); if (!logFileOp.IsOpen()) { return false; } @@ -369,10 +372,9 @@ int64_t HashSignatureString(const char* str, size_t strLen) { return *(int64_t*)hashVal; } -void HashCombine(size_t &seed, size_t value) { +void HashCombine(size_t& seed, size_t value) { boost::hash_combine(seed, value); } - } // namespace logtail diff --git a/core/common/HashUtil.h b/core/common/HashUtil.h index 122f948b4c..1612ffeee8 100644 --- a/core/common/HashUtil.h +++ b/core/common/HashUtil.h @@ -27,11 +27,11 @@ void DoMd5(const uint8_t* poolIn, const uint64_t inputBytesNum, uint8_t md5[16]) bool SignatureToHash(const std::string& signature, uint64_t& sigHash, uint32_t& sigSize); bool CheckAndUpdateSignature(const std::string& signature, uint64_t& sigHash, uint32_t& sigSize); -bool CheckFileSignature(const std::string& filePath, uint64_t sigHash, uint32_t sigSize, bool fuseMode = false); +bool CheckFileSignature(const std::string& filePath, uint64_t sigHash, uint32_t sigSize); int64_t HashString(const std::string& str); int64_t HashSignatureString(const char* str, size_t strLen); -void HashCombine(size_t &seed, size_t value); +void HashCombine(size_t& seed, size_t value); } // namespace logtail diff --git a/core/common/LogFileOperator.cpp b/core/common/LogFileOperator.cpp index 836ea834ae..284eff7d10 100644 --- a/core/common/LogFileOperator.cpp +++ b/core/common/LogFileOperator.cpp @@ -14,41 +14,35 @@ #include "LogFileOperator.h" #if defined(_MSC_VER) -#include #include +#include #endif #include "FileSystemUtil.h" -#include "fuse/ulogfslib_file.h" namespace logtail { -int LogFileOperator::Open(const char* path, bool fuseMode) { +int LogFileOperator::Open(const char* path) { if (!path || IsOpen()) { return -1; } - mFuseMode = fuseMode; - if (mFuseMode) { - mFd = ulogfs_open(path); - } else { #if defined(_MSC_VER) - auto hFile = CreateFile(path, - GENERIC_READ, - FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - NULL, - OPEN_EXISTING, - FILE_ATTRIBUTE_NORMAL, - NULL); - if (INVALID_HANDLE_VALUE == hFile) { - return -1; - } - mFile = hFile; - // Might conflict, but can make sure that mFd >= 0. - mFd = (reinterpret_cast(hFile)) & std::numeric_limits::max(); + auto hFile = CreateFile(path, + GENERIC_READ, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL); + if (INVALID_HANDLE_VALUE == hFile) { + return -1; + } + mFile = hFile; + // Might conflict, but can make sure that mFd >= 0. + mFd = (reinterpret_cast(hFile)) & std::numeric_limits::max(); #else - mFd = open(path, O_RDONLY); + mFd = open(path, O_RDONLY); #endif - } return mFd; } @@ -56,34 +50,30 @@ int64_t LogFileOperator::Seek(int64_t offset, int whence) { if (!IsOpen()) { return -1; } - if (mFuseMode) { - return ulogfs_seek(mFd, offset, whence); - } else { #if defined(_MSC_VER) - switch (whence) { - case SEEK_CUR: - whence = FILE_CURRENT; - break; - case SEEK_SET: - whence = FILE_BEGIN; - break; - case SEEK_END: - whence = FILE_END; - break; - default: - return -1; - } - LARGE_INTEGER liPos; - liPos.QuadPart = offset; - LARGE_INTEGER liNewPos{0}; - if (FALSE == SetFilePointerEx(mFile, liPos, &liNewPos, whence)) { + switch (whence) { + case SEEK_CUR: + whence = FILE_CURRENT; + break; + case SEEK_SET: + whence = FILE_BEGIN; + break; + case SEEK_END: + whence = FILE_END; + break; + default: return -1; - } - return liNewPos.QuadPart; + } + LARGE_INTEGER liPos; + liPos.QuadPart = offset; + LARGE_INTEGER liNewPos{0}; + if (FALSE == SetFilePointerEx(mFile, liPos, &liNewPos, whence)) { + return -1; + } + return liNewPos.QuadPart; #else - return lseek(mFd, offset, whence); + return lseek(mFd, offset, whence); #endif - } } int LogFileOperator::Stat(fsutil::PathStat& ps) const { @@ -91,15 +81,11 @@ int LogFileOperator::Stat(fsutil::PathStat& ps) const { return -1; } - if (mFuseMode) { - return ulogfs_stat(mFd, ps.GetRawStat()); - } else { #if defined(_MSC_VER) - return fsutil::PathStat::fstat(mFile, ps) ? 0 : -1; + return fsutil::PathStat::fstat(mFile, ps) ? 0 : -1; #else - return fsutil::PathStat::fstat(mFd, ps) ? 0 : -1; + return fsutil::PathStat::fstat(mFd, ps) ? 0 : -1; #endif - } } int LogFileOperator::Pread(void* ptr, size_t size, size_t count, int64_t offset) { @@ -107,54 +93,20 @@ int LogFileOperator::Pread(void* ptr, size_t size, size_t count, int64_t offset) return 0; } - if (mFuseMode) { - // datadir is NULL, ulogfs will get real datadir from env - return ulogfs_pread2(mFd, NULL, ptr, size * count, (off_t*)&offset); - } else { #if defined(_MSC_VER) - LARGE_INTEGER liPos; - liPos.QuadPart = offset; - if (FALSE == SetFilePointerEx(mFile, liPos, NULL, FILE_BEGIN)) { - return 0; - } - DWORD dwRead = 0; - if (FALSE == ::ReadFile(mFile, ptr, size * count, &dwRead, NULL)) { - return 0; - } - return static_cast(dwRead); -#else - return pread(mFd, ptr, size * count, offset); -#endif - } -} - -size_t LogFileOperator::SkipHoleRead(void* ptr, size_t size, size_t count, int64_t* offset) { - if (!mFuseMode || !ptr || !size || !count || !IsOpen()) { + LARGE_INTEGER liPos; + liPos.QuadPart = offset; + if (FALSE == SetFilePointerEx(mFile, liPos, NULL, FILE_BEGIN)) { return 0; } - - int64_t off = *offset; - int nBytes = ulogfs_pread2(mFd, NULL, ptr, (int)(size * count), (off_t*)&off); - if (nBytes <= 0) { - return nBytes; - } - - auto readBytes = (size_t)nBytes; - - // if off == *offset, no hole no extra handle - // if off > *offset, there is a hole - if (off > *offset) { - if (off > *offset + nBytes) { - readBytes = 0; - } else { - readBytes = *offset + nBytes - off; - memmove(ptr, ((char*)ptr + (off - *offset)), readBytes); - } - - *offset = off; + DWORD dwRead = 0; + if (FALSE == ::ReadFile(mFile, ptr, size * count, &dwRead, NULL)) { + return 0; } - - return readBytes; + return static_cast(dwRead); +#else + return pread(mFd, ptr, size * count, offset); +#endif } int64_t LogFileOperator::GetFileSize() const { @@ -162,19 +114,15 @@ int64_t LogFileOperator::GetFileSize() const { return -1; } - if (mFuseMode) { - return static_cast(ulogfs_tell(mFd)); - } else { #if defined(_MSC_VER) - LARGE_INTEGER liSize{0}; - if (FALSE == GetFileSizeEx(mFile, &liSize)) { - return -1; - } - return static_cast(liSize.QuadPart); + LARGE_INTEGER liSize{0}; + if (FALSE == GetFileSizeEx(mFile, &liSize)) { + return -1; + } + return static_cast(liSize.QuadPart); #else - return static_cast(lseek(mFd, 0, SEEK_END)); + return static_cast(lseek(mFd, 0, SEEK_END)); #endif - } } bool LogFileOperator::IsOpen() const { @@ -187,16 +135,12 @@ int LogFileOperator::Close() { } int ret = 0; - if (mFuseMode) { - ret = ulogfs_close(mFd); - } else { #if defined(_MSC_VER) - ret = (TRUE == CloseHandle(mFile)) ? 0 : -1; - mFile = INVALID_HANDLE_VALUE; + ret = (TRUE == CloseHandle(mFile)) ? 0 : -1; + mFile = INVALID_HANDLE_VALUE; #else - ret = close(mFd); + ret = close(mFd); #endif - } mFd = -1; return ret; } @@ -210,23 +154,19 @@ std::string LogFileOperator::GetFilePath() const { return ""; } - if (mFuseMode) { - return GetFdPath(mFd); - } else { #if defined(_MSC_VER) - char filePath[MAX_PATH + 1]; - auto ret = GetFinalPathNameByHandle(mFile, filePath, MAX_PATH + 1, VOLUME_NAME_DOS); - if (ret > MAX_PATH || ret <= 0) { - return ""; - } - if (0 == memcmp(filePath, "\\\\?\\", 4)) { - return std::string(filePath + 4); - } - return std::string(filePath); + char filePath[MAX_PATH + 1]; + auto ret = GetFinalPathNameByHandle(mFile, filePath, MAX_PATH + 1, VOLUME_NAME_DOS); + if (ret > MAX_PATH || ret <= 0) { + return ""; + } + if (0 == memcmp(filePath, "\\\\?\\", 4)) { + return std::string(filePath + 4); + } + return std::string(filePath); #else - return GetFdPath(mFd); + return GetFdPath(mFd); #endif - } } } // namespace logtail diff --git a/core/common/LogFileOperator.h b/core/common/LogFileOperator.h index 19f5ccb513..8ff678ef57 100644 --- a/core/common/LogFileOperator.h +++ b/core/common/LogFileOperator.h @@ -35,12 +35,12 @@ namespace fsutil { class LogFileOperator { public: - LogFileOperator(bool fuseMode = false) : mFuseMode(fuseMode) {} + LogFileOperator() = default; ~LogFileOperator() { Close(); } // @return file descriptor when fuseMode is enabled or on Linux. // An positve identifier is returned on Windows. - int Open(const char* path, bool fuseMode = false); + int Open(const char* path); int64_t Seek(int64_t offset, int whence); @@ -48,9 +48,6 @@ class LogFileOperator { int Pread(void* ptr, size_t size, size_t count, int64_t offset); - // For FUSE only. - size_t SkipHoleRead(void* ptr, size_t size, size_t count, int64_t* offset); - // GetFileSize gets the size of current file. int64_t GetFileSize() const; @@ -77,7 +74,6 @@ class LogFileOperator { HANDLE mFile = INVALID_HANDLE_VALUE; #endif int mFd = -1; - bool mFuseMode; #ifdef APSARA_UNIT_TEST_MAIN friend class LogFileOperatorUnittest; diff --git a/core/ebpf/config.cpp b/core/ebpf/config.cpp index 35c18ed749..026d37c70f 100644 --- a/core/ebpf/config.cpp +++ b/core/ebpf/config.cpp @@ -331,76 +331,18 @@ void InitSecurityNetworkFilter(const Json::Value& config, } } -void FilterValidSecurityProbeCallName(SecurityProbeType type, - std::vector& callNames, - std::string& errorMsg) { - if (type >= SecurityProbeType::MAX) { - errorMsg = "Invalid security eBPF probe type"; - return; - } - std::vector survivedCallNames; - bool allValid = true; - for (auto& callName : callNames) { - if (callNameDict.at(type).find(callName) == callNameDict.at(type).end()) { - if (!allValid) { - errorMsg += ", " + callName; - } else { - errorMsg = "Invalid callnames for security eBPF probe: " + callName; - allValid = false; - } - } else { - survivedCallNames.emplace_back(callName); - } - } - callNames.swap(survivedCallNames); -} - void GetSecurityProbeDefaultCallName(SecurityProbeType type, std::vector& callNames) { callNames.assign(callNameDict.at(type).begin(), callNameDict.at(type).end()); } -void InitCallNameFilter(const Json::Value& config, - std::vector& callNames, - const PipelineContext* mContext, - const std::string& sName, - SecurityProbeType probeType) { - std::string errorMsg; - // CallNameFilter (Optional) - if (!config.isMember("CallNameFilter")) { - // No CallNameFilter, use default callnames, no warning - } else if (!config["CallNameFilter"].isArray()) { - // CallNameFilter is not empty but of wrong type, use default callnames - errorMsg = "CallNameFilter is not of type list"; - } else if (!GetOptionalListFilterParam(config, "CallNameFilter", callNames, errorMsg)) { - // CallNameFilter has element of wrong type, use default callnames - } else { - FilterValidSecurityProbeCallName(probeType, callNames, errorMsg); - // If CallNameFilter contains valid callnames, use user defined callnames, otherwise use default callnames - } - if (!errorMsg.empty()) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - // Use default callnames - if (callNames.empty()) { - GetSecurityProbeDefaultCallName(probeType, callNames); - } -} - bool CheckProbeConfigValid(const Json::Value& config, std::string& errorMsg) { errorMsg.clear(); if (!config.isMember("ProbeConfig")) { // No ProbeConfig, use default, no warning return false; - } else if (!config["ProbeConfig"].isArray()) { + } else if (!config["ProbeConfig"].isObject()) { // ProbeConfig is not empty but of wrong type, use default - errorMsg = "ProbeConfig is not of type list, use probe config with default filter"; + errorMsg = "ProbeConfig is not of type map, use probe config with default filter"; return false; } return true; @@ -429,62 +371,39 @@ bool SecurityOptions::Init(SecurityProbeType probeType, mOptionList.emplace_back(thisSecurityOption); return true; } - std::unordered_set thisCallNameSet; - for (auto& innerConfig : config["ProbeConfig"]) { - nami::SecurityOption thisSecurityOption; - // Genral Filter (Optional) - std::variant thisFilter; - switch (probeType) { - case SecurityProbeType::FILE: { - nami::SecurityFileFilter thisFileFilter; - InitSecurityFileFilter(innerConfig, thisFileFilter, mContext, sName); - thisFilter.emplace(thisFileFilter); - break; - } - case SecurityProbeType::NETWORK: { - nami::SecurityNetworkFilter thisNetworkFilter; - InitSecurityNetworkFilter(innerConfig, thisNetworkFilter, mContext, sName); - thisFilter.emplace(thisNetworkFilter); - break; - } - case SecurityProbeType::PROCESS: { - break; - } - default: - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - "Unknown security eBPF probe type", - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); + auto innerConfig = config["ProbeConfig"]; + nami::SecurityOption thisSecurityOption; + // Genral Filter (Optional) + std::variant thisFilter; + switch (probeType) { + case SecurityProbeType::FILE: { + nami::SecurityFileFilter thisFileFilter; + InitSecurityFileFilter(innerConfig, thisFileFilter, mContext, sName); + thisFilter.emplace(thisFileFilter); + break; } - // CallNameFilter (Optional) - std::vector thisCallNames; - InitCallNameFilter(innerConfig, thisCallNames, mContext, sName, probeType); - // Check duplicate callnames and remove them - for (auto& callName : thisCallNames) { - if (thisCallNameSet.find(callName) == thisCallNameSet.end()) { - thisCallNameSet.insert(callName); - thisSecurityOption.call_names_.emplace_back(callName); - } else { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - "Duplicate callname " + callName + " is discarded", - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } + case SecurityProbeType::NETWORK: { + nami::SecurityNetworkFilter thisNetworkFilter; + InitSecurityNetworkFilter(innerConfig, thisNetworkFilter, mContext, sName); + thisFilter.emplace(thisNetworkFilter); + break; } - // If callnames in this option are all duplicated, discard this option - if (!thisSecurityOption.call_names_.empty()) { - thisSecurityOption.filter_ = thisFilter; - mOptionList.emplace_back(thisSecurityOption); + case SecurityProbeType::PROCESS: { + break; } + default: + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + "Unknown security eBPF probe type", + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); } + thisSecurityOption.filter_ = thisFilter; + GetSecurityProbeDefaultCallName(probeType, thisSecurityOption.call_names_); + mOptionList.emplace_back(thisSecurityOption); mProbeType = probeType; return true; } diff --git a/core/file_server/EventDispatcher.cpp b/core/file_server/EventDispatcher.cpp index e9d3abab82..fe41226b51 100644 --- a/core/file_server/EventDispatcher.cpp +++ b/core/file_server/EventDispatcher.cpp @@ -42,13 +42,13 @@ #include "file_server/event/Event.h" #include "file_server/event_handler/EventHandler.h" #include "file_server/event_handler/LogInput.h" -#include "protobuf/sls/metric.pb.h" -#include "protobuf/sls/sls_logs.pb.h" +#include "file_server/polling/PollingDirFile.h" +#include "file_server/polling/PollingModify.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "monitor/MetricExportor.h" -#include "file_server/polling/PollingDirFile.h" -#include "file_server/polling/PollingModify.h" +#include "protobuf/sls/metric.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #ifdef APSARA_UNIT_TEST_MAIN #include "file_server/polling/PollingEventQueue.h" #endif @@ -56,10 +56,10 @@ #include "file_server/ConfigManager.h" #include "file_server/FileServer.h" #include "go_pipeline/LogtailPlugin.h" -#include "plugin/input/InputContainerStdio.h" -#include "plugin/input/InputFile.h" #include "pipeline/PipelineManager.h" #include "pipeline/plugin/PluginRegistry.h" +#include "plugin/input/InputContainerStdio.h" +#include "plugin/input/InputFile.h" using namespace std; using namespace sls_logs; @@ -464,7 +464,7 @@ EventDispatcher::ValidateCheckpointResult EventDispatcher::validateCheckpoint( int wd = pathIter->second; DevInode devInode = GetFileDevInode(realFilePath); if (devInode.IsValid() && checkpoint->mDevInode.inode == devInode.inode) { - if (!CheckFileSignature(realFilePath, checkpoint->mSignatureHash, checkpoint->mSignatureSize, false)) { + if (!CheckFileSignature(realFilePath, checkpoint->mSignatureHash, checkpoint->mSignatureSize)) { LOG_INFO(sLogger, ("delete checkpoint", "file device & inode remains the same but signature has changed")( "config", checkpoint->mConfigName)("log reader queue name", checkpoint->mFileName)( @@ -514,10 +514,8 @@ EventDispatcher::ValidateCheckpointResult EventDispatcher::validateCheckpoint( return ValidateCheckpointResult::kLogDirChanged; } - if (CheckFileSignature(PathJoin(path, findIter->second.mFileName), - checkpoint->mSignatureHash, - checkpoint->mSignatureSize, - false)) { + if (CheckFileSignature( + PathJoin(path, findIter->second.mFileName), checkpoint->mSignatureHash, checkpoint->mSignatureSize)) { checkpoint->mRealFileName = PathJoin(findIter->second.mFileDir, findIter->second.mFileName); LOG_INFO(sLogger, ("generate MODIFY event for file with checkpoint", @@ -567,7 +565,7 @@ EventDispatcher::ValidateCheckpointResult EventDispatcher::validateCheckpoint( = SearchFilePathByDevInodeInDirectory(path, searchDepth, checkpoint->mDevInode, &cachePathDevInodeMap); if (searchResult) { const auto& newRealPath = searchResult.value(); - if (CheckFileSignature(newRealPath, checkpoint->mSignatureHash, checkpoint->mSignatureSize, false)) { + if (CheckFileSignature(newRealPath, checkpoint->mSignatureHash, checkpoint->mSignatureSize)) { checkpoint->mRealFileName = newRealPath; LOG_INFO(sLogger, ("generate MODIFY event for file with checkpoint", diff --git a/core/file_server/event_handler/EventHandler.cpp b/core/file_server/event_handler/EventHandler.cpp index a41c3652d9..22fdbe41ba 100644 --- a/core/file_server/event_handler/EventHandler.cpp +++ b/core/file_server/event_handler/EventHandler.cpp @@ -28,7 +28,6 @@ #include "file_server/event/BlockEventManager.h" #include "file_server/ConfigManager.h" #include "file_server/FileServer.h" -#include "fuse/FuseFileBlacklist.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" #include "runner/LogProcess.h" diff --git a/core/file_server/event_handler/LogInput.cpp b/core/file_server/event_handler/LogInput.cpp index c48d77a3cf..a5ea529bec 100644 --- a/core/file_server/event_handler/LogInput.cpp +++ b/core/file_server/event_handler/LogInput.cpp @@ -88,8 +88,8 @@ void LogInput::Start() { mInteruptFlag = false; - mGlobalOpenFdTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_OPEN_FD_TOTAL); - mGlobalRegisterHandlerTotal + mAgentOpenFdTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_OPEN_FD_TOTAL); + mAgentRegisterHandlerTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_REGISTER_HANDLER_TOTAL); new Thread([this]() { ProcessLoop(); }); @@ -347,10 +347,10 @@ void LogInput::UpdateCriticalMetric(int32_t curTime) { 1.0 * mEventProcessCount / (curTime - mLastUpdateMetricTime)); int32_t openFdTotal = GloablFileDescriptorManager::GetInstance()->GetOpenedFilePtrSize(); LogtailMonitor::GetInstance()->UpdateMetric("open_fd", openFdTotal); - mGlobalOpenFdTotal->Set(openFdTotal); + mAgentOpenFdTotal->Set(openFdTotal); size_t handlerCount = EventDispatcher::GetInstance()->GetHandlerCount(); LogtailMonitor::GetInstance()->UpdateMetric("register_handler", handlerCount); - mGlobalRegisterHandlerTotal->Set(handlerCount); + mAgentRegisterHandlerTotal->Set(handlerCount); LogtailMonitor::GetInstance()->UpdateMetric("reader_count", CheckPointManager::Instance()->GetReaderCount()); LogtailMonitor::GetInstance()->UpdateMetric("multi_config", AppConfig::GetInstance()->IsAcceptMultiConfig()); mEventProcessCount = 0; diff --git a/core/file_server/event_handler/LogInput.h b/core/file_server/event_handler/LogInput.h index 3940030190..21217086a3 100644 --- a/core/file_server/event_handler/LogInput.h +++ b/core/file_server/event_handler/LogInput.h @@ -79,8 +79,8 @@ class LogInput : public LogRunnable { volatile bool mIdleFlag; int32_t mEventProcessCount; int32_t mLastUpdateMetricTime; - IntGaugePtr mGlobalOpenFdTotal; - IntGaugePtr mGlobalRegisterHandlerTotal; + IntGaugePtr mAgentOpenFdTotal; + IntGaugePtr mAgentRegisterHandlerTotal; std::atomic_int mLastReadEventTime{0}; mutable std::mutex mThreadRunningMux; diff --git a/core/file_server/polling/PollingDirFile.cpp b/core/file_server/polling/PollingDirFile.cpp index 4905bcd8d9..2bc809de7c 100644 --- a/core/file_server/polling/PollingDirFile.cpp +++ b/core/file_server/polling/PollingDirFile.cpp @@ -68,10 +68,10 @@ static const int64_t NANO_CONVERTING = 1000000000; void PollingDirFile::Start() { ClearCache(); - mGlobalConfigTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_PIPELINE_CONFIG_TOTAL); - mGlobalPollingDirCacheSizeTotal + mAgentConfigTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_PIPELINE_CONFIG_TOTAL); + mAgentPollingDirCacheSizeTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL); - mGlobalPollingFileCacheSizeTotal + mAgentPollingFileCacheSizeTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_POLLING_FILE_CACHE_SIZE_TOTAL); mRuningFlag = true; mThreadPtr = CreateThread([this]() { Polling(); }); @@ -152,15 +152,15 @@ void PollingDirFile::Polling() { size_t configTotal = nameConfigMap.size(); LogtailMonitor::GetInstance()->UpdateMetric("config_count", configTotal); - mGlobalConfigTotal->Set(configTotal); + mAgentConfigTotal->Set(configTotal); { ScopedSpinLock lock(mCacheLock); size_t pollingDirCacheSize = mDirCacheMap.size(); LogtailMonitor::GetInstance()->UpdateMetric("polling_dir_cache", pollingDirCacheSize); - mGlobalPollingDirCacheSizeTotal->Set(pollingDirCacheSize); + mAgentPollingDirCacheSizeTotal->Set(pollingDirCacheSize); size_t pollingFileCacheSize = mFileCacheMap.size(); LogtailMonitor::GetInstance()->UpdateMetric("polling_file_cache", pollingFileCacheSize); - mGlobalPollingFileCacheSizeTotal->Set(pollingFileCacheSize); + mAgentPollingFileCacheSizeTotal->Set(pollingFileCacheSize); } // Iterate all normal configs, make sure stat count will not exceed limit. diff --git a/core/file_server/polling/PollingDirFile.h b/core/file_server/polling/PollingDirFile.h index b9af8c8988..40a4ebfb8b 100644 --- a/core/file_server/polling/PollingDirFile.h +++ b/core/file_server/polling/PollingDirFile.h @@ -136,9 +136,9 @@ class PollingDirFile : public LogRunnable { // The sequence number of current round, uint64_t is used to avoid overflow. uint64_t mCurrentRound; - IntGaugePtr mGlobalConfigTotal; - IntGaugePtr mGlobalPollingDirCacheSizeTotal; - IntGaugePtr mGlobalPollingFileCacheSizeTotal; + IntGaugePtr mAgentConfigTotal; + IntGaugePtr mAgentPollingDirCacheSizeTotal; + IntGaugePtr mAgentPollingFileCacheSizeTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class PollingUnittest; diff --git a/core/file_server/polling/PollingModify.cpp b/core/file_server/polling/PollingModify.cpp index 4b6e4c0995..6dac3afad4 100644 --- a/core/file_server/polling/PollingModify.cpp +++ b/core/file_server/polling/PollingModify.cpp @@ -47,7 +47,7 @@ PollingModify::~PollingModify() { void PollingModify::Start() { ClearCache(); - mGlobalPollingModifySizeTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_POLLING_MODIFY_SIZE_TOTAL); + mAgentPollingModifySizeTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_POLLING_MODIFY_SIZE_TOTAL); mRuningFlag = true; mThreadPtr = CreateThread([this]() { Polling(); }); @@ -251,7 +251,7 @@ void PollingModify::Polling() { int32_t statCount = 0; size_t pollingModifySizeTotal = mModifyCacheMap.size(); LogtailMonitor::GetInstance()->UpdateMetric("polling_modify_size", pollingModifySizeTotal); - mGlobalPollingModifySizeTotal->Set(pollingModifySizeTotal); + mAgentPollingModifySizeTotal->Set(pollingModifySizeTotal); for (auto iter = mModifyCacheMap.begin(); iter != mModifyCacheMap.end(); ++iter) { if (!mRuningFlag || mHoldOnFlag) break; diff --git a/core/file_server/polling/PollingModify.h b/core/file_server/polling/PollingModify.h index 9be9827ffc..c82337a898 100644 --- a/core/file_server/polling/PollingModify.h +++ b/core/file_server/polling/PollingModify.h @@ -101,7 +101,7 @@ class PollingModify : public LogRunnable { ModifyCheckCacheMap mModifyCacheMap; - IntGaugePtr mGlobalPollingModifySizeTotal; + IntGaugePtr mAgentPollingModifySizeTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class PollingUnittest; diff --git a/core/file_server/reader/LogFileReader.cpp b/core/file_server/reader/LogFileReader.cpp index 098eb176de..387c274c5f 100644 --- a/core/file_server/reader/LogFileReader.cpp +++ b/core/file_server/reader/LogFileReader.cpp @@ -28,7 +28,6 @@ #include #include -#include "file_server/reader/GloablFileDescriptorManager.h" #include "app_config/AppConfig.h" #include "checkpoint/CheckPointManager.h" #include "checkpoint/CheckpointManagerV2.h" @@ -41,20 +40,20 @@ #include "common/TimeUtil.h" #include "common/UUIDUtil.h" #include "file_server/ConfigManager.h" +#include "file_server/FileServer.h" #include "file_server/event/BlockEventManager.h" #include "file_server/event_handler/LogInput.h" -#include "file_server/FileServer.h" -#include "fuse/UlogfsHandler.h" +#include "file_server/reader/GloablFileDescriptorManager.h" +#include "file_server/reader/JsonLogFileReader.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "monitor/MetricConstants.h" -#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" #include "pipeline/queue/ExactlyOnceQueueManager.h" #include "pipeline/queue/ProcessQueueManager.h" #include "pipeline/queue/QueueKeyManager.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" #include "rapidjson/document.h" -#include "file_server/reader/JsonLogFileReader.h" #include "sdk/Common.h" using namespace sls_logs; @@ -478,8 +477,8 @@ bool LogFileReader::validatePrimaryCheckpoint(const PrimaryCheckpointPB& cpt) { filePath = newFilePath; return true; }; - if (CheckFileSignature(filePath, sigHash, sigSize, false) - || (hasFileBeenRotated() && CheckFileSignature(filePath, sigHash, sigSize, false))) { + if (CheckFileSignature(filePath, sigHash, sigSize) + || (hasFileBeenRotated() && CheckFileSignature(filePath, sigHash, sigSize))) { mLastFileSignatureSize = sigSize; mLastFileSignatureHash = sigHash; mRealLogPath = filePath; @@ -621,7 +620,7 @@ bool LogFileReader::CheckForFirstOpen(FileReadPolicy policy) { // we just want to set file pos, then a TEMPORARY object for LogFileOperator is needed here, not a class member // LogFileOperator we should open file via UpdateFilePtr, then start reading LogFileOperator op; - op.Open(mHostLogPath.c_str(), false); + op.Open(mHostLogPath.c_str()); if (op.IsOpen() == false) { mLastFilePos = 0; mCache.clear(); @@ -1062,7 +1061,7 @@ bool LogFileReader::UpdateFilePtr() { LOG_DEBUG(sLogger, ("UpdateFilePtr open log file ", mHostLogPath)); if (mRealLogPath.size() > 0) { while (tryTime++ < 5) { - mLogFileOp.Open(mRealLogPath.c_str(), false); + mLogFileOp.Open(mRealLogPath.c_str()); if (mLogFileOp.IsOpen() == false) { usleep(100); } else { @@ -1097,7 +1096,7 @@ bool LogFileReader::UpdateFilePtr() { } tryTime = 0; while (tryTime++ < 5) { - mLogFileOp.Open(mHostLogPath.c_str(), false); + mLogFileOp.Open(mHostLogPath.c_str()); if (mLogFileOp.IsOpen() == false) { usleep(100); } else { @@ -1923,7 +1922,7 @@ LogFileReader::ReadFile(LogFileOperator& op, void* buf, size_t size, int64_t& of LogFileReader::FileCompareResult LogFileReader::CompareToFile(const string& filePath) { LogFileOperator logFileOp; - logFileOp.Open(filePath.c_str(), false); + logFileOp.Open(filePath.c_str()); if (logFileOp.IsOpen() == false) { return FileCompareResult_Error; } @@ -2484,7 +2483,7 @@ void LogFileReader::UpdateReaderManual() { if (mLogFileOp.IsOpen()) { mLogFileOp.Close(); } - mLogFileOp.Open(mHostLogPath.c_str(), false); + mLogFileOp.Open(mHostLogPath.c_str()); mDevInode = GetFileDevInode(mHostLogPath); mRealLogPath = mHostLogPath; } diff --git a/core/fuse/FuseFileBlacklist.h b/core/fuse/FuseFileBlacklist.h deleted file mode 100644 index d90e329a53..0000000000 --- a/core/fuse/FuseFileBlacklist.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include "common/Lock.h" -#include "common/DevInode.h" - -namespace logtail { - -class FuseFileBlacklist { -public: - static FuseFileBlacklist* GetInstance() { - static FuseFileBlacklist* ptr = new FuseFileBlacklist; - return ptr; - } - -private: - FuseFileBlacklist() {} - ~FuseFileBlacklist() {} - -public: - void AddToBlacklist(const std::string& filename) {} - void RemoveFromBlackList(const std::string& filename) {} - void RemoveFile() {} - -#ifdef APSARA_UNIT_TEST_MAIN - friend class FuseFileUnittest; -#endif -}; - -} // namespace logtail diff --git a/core/fuse/UlogfsHandler.h b/core/fuse/UlogfsHandler.h deleted file mode 100644 index e67a3d3643..0000000000 --- a/core/fuse/UlogfsHandler.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -namespace logtail { - -struct FileInfo; - -class UlogfsHandler { -public: - static UlogfsHandler* GetInstance() { - static UlogfsHandler* ptr = new UlogfsHandler(); - return ptr; - } - - int Sparse(const FileInfo* fileInfo) { return 0; } - -private: - UlogfsHandler() {} - ~UlogfsHandler() {} - -#ifdef APSARA_UNIT_TEST_MAIN - friend class FuseFileUnittest; -#endif -}; - -} // namespace logtail diff --git a/core/fuse/ulogfslib_file.h b/core/fuse/ulogfslib_file.h deleted file mode 100644 index 08512098b8..0000000000 --- a/core/fuse/ulogfslib_file.h +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#if defined(_MSC_VER) -#include -typedef int64_t ssize_t; -#endif - -/** - * Open ulogfs file in readonly mode - * - * @param name - * The name of ulogfs file. - * @return - * The file descriptor of an opened ulogfs file, or negative value on error. - */ -int ulogfs_open_ro(const char* name); - - -/** - * Open ulogfs file in rw mode - * - * @param name - * The name of ulogfs file. - * @return - * The file descriptor of an opened ulogfs file, or a negative value on error. - */ -int ulogfs_open(const char* name); - - -/** - * Close ulogfs file - * - * @param fd - * The file descriptor of previously opened file. - * @return - * 0 on success, or a negative value on error. - */ -int ulogfs_close(int fd); - - -/** - * Read data from previously opened file with offset. - * - * @param fd - * The file descriptor of previously opened file. - * @param buf - * The memory where to hold the data. - * @param count - * The number of data in byte to read from file. - * @param offset - * The offset of file, would be modified if file was ftruncated. - * @return - * Positive or 0 on success indicates bytes read from file, - * negative value on error. - */ -ssize_t ulogfs_pread(int fd, void* buf, size_t count, off_t* offset); - - -/** - * 1/31/2019, after rewrite the pdflush mechanism, the data might be - * stored either in tmpfs or in datadir. we combine both parts of data - * together. - * - * 4/10/2019, if the caller didn't pass datadir parameter, we will read it - * from environment variales. - * - * Read data from previously opened file with offset, and combine both tmpfs and - * datadir parts of data together. - * - * @param fd - * The file descriptor of previously opened file. - * @param datadir - * the path of datadir directory, if the caller didn't pass datadir parameter, - * it will read from environment variales. - * @param buf - * The memory where to hold the data. - * @param count - * The number of data in byte to read from file. - * @param offset - * The offset of file, would be modified if file was ftruncated. - * @return - * Positive or 0 on success indicates bytes read from file, - * negative value on error. - */ -ssize_t ulogfs_pread2(int fd, const char* datadir, void* buf, size_t count, off_t* offset); - - -/** - * Reposition a fd. - * - * @param fd - * The file descriptor of previously opened file. - * @param offset - * The offset to whence. - * @param whence - * The position to stub. - * @return - * Positive or 0 on success, negative value on error. - */ -off_t ulogfs_seek(int fd, off_t offset, int whence); - - -/** - * Get stat info of file. - * - * @param fd - * The file descriptor of previously opened file. - * @param buf - * The memory where to hold the data. - * @return - * Positive or 0 on success, negative value on error. - */ -#if defined(__linux__) -int ulogfs_stat(int fd, struct stat* buf); -#elif defined(_MSC_VER) -int ulogfs_stat(int fd, struct _stat64* buf); -#endif - - -/** - * Get current file ptr offset. - * - * @param fd - * The file descriptor of previously opened file. - * @return - * Positive or 0 on success indicates size of file, - * negative value on error. - */ -int64_t ulogfs_tell(int fd); - -/** - * Inform ulogfs of data handled successfully. - * - * @param fd - * The file descriptor - * @param name - * The file name. - * @param offset - * The offset of buffer - * @param length - * The lenth of buffer - * @param force - * // - * @return - * 0 on success, or a negative value on error. - */ -int ulogfs_sparse(int fd, const char* name, off_t offset, int64_t length); - - -/** - * Inform ulogfs that the data of file can be fully flushed - * - * @param fd - * The file descriptor - * @param name - * the file name - * @return - * 0 on success, or a negative value on error. - */ -int ulogfs_fflush(int fd, const char* name); - -int ulogfs_fflush2(int fd, const char* name); - -/** - * Dump ulogfs inodes or files data - * - * @param type - * dump file or inode - * @param ino - * inode number, if you want to dump all inodes info, set ino to 0. - * @param buf - * data buffer - * @param len - * buffer length - * @return - * 0 on success, or a negative value on error. - */ -int ulogfs_dump(int type, long ino, void* buf, int64_t* len); - -/* send HELLO request to daemon and receive HELLO_REPLY */ -int ulogfs_hello(); - -int ulogfs_upgrade(); - -#ifdef __cplusplus -} -#endif diff --git a/core/fuse/ulogfslib_file_dummy.cpp b/core/fuse/ulogfslib_file_dummy.cpp deleted file mode 100644 index 740d013329..0000000000 --- a/core/fuse/ulogfslib_file_dummy.cpp +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2022 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ulogfslib_file.h" - -// Empty implementations of ulogfs. -// TODO: Replace it with libulogfs.a - -#ifdef __cplusplus -extern "C" { -#endif - -int ulogfs_open_ro(const char* name) { - return -1; -} - -int ulogfs_open(const char* name) { - return -1; -} - -int ulogfs_close(int fd) { - return -1; -} - -ssize_t ulogfs_pread(int fd, void* buf, size_t count, off_t* offset) { - return -1; -} - -ssize_t ulogfs_pread2(int fd, const char* datadir, void* buf, size_t count, off_t* offset) { - return -1; -} - -off_t ulogfs_seek(int fd, off_t offset, int whence) { - return -1; -} - -#if defined(__linux__) -int ulogfs_stat(int fd, struct stat* buf) { - return -1; -} -#elif defined(_MSC_VER) -int ulogfs_stat(int fd, struct _stat64* buf) { - return -1; -} -#endif - -int64_t ulogfs_tell(int fd) { - return -1; -} - -int ulogfs_sparse(int fd, const char* name, off_t offset, int64_t length) { - return -1; -} - -int ulogfs_fflush(int fd, const char* name) { - return -1; -} - -int ulogfs_fflush2(int fd, const char* name) { - return -1; -} - -int ulogfs_dump(int type, long ino, void* buf, int64_t* len) { - return -1; -} - -int ulogfs_hello() { - return -1; -} - -int ulogfs_upgrade() { - return -1; -} - -#ifdef __cplusplus -} -#endif diff --git a/core/go_pipeline/LogtailPlugin.cpp b/core/go_pipeline/LogtailPlugin.cpp index 26c9e64e4f..9b78fb5a1a 100644 --- a/core/go_pipeline/LogtailPlugin.cpp +++ b/core/go_pipeline/LogtailPlugin.cpp @@ -409,10 +409,10 @@ bool LogtailPlugin::LoadPluginBase() { LOG_ERROR(sLogger, ("load ProcessLogGroup error, Message", error)); return mPluginValid; } - // 获取golang插件部分统计信息 - mGetPipelineMetricsFun = (GetPipelineMetricsFun)loader.LoadMethod("GetPipelineMetrics", error); + // 获取golang部分指标信息 + mGetGoMetricsFun = (GetGoMetricsFun)loader.LoadMethod("GetGoMetrics", error); if (!error.empty()) { - LOG_ERROR(sLogger, ("load GetPipelineMetrics error, Message", error)); + LOG_ERROR(sLogger, ("load GetGoMetrics error, Message", error)); return mPluginValid; } @@ -497,9 +497,12 @@ void LogtailPlugin::ProcessLogGroup(const std::string& configName, } } -void LogtailPlugin::GetPipelineMetrics(std::vector>& metircsList) { - if (mGetPipelineMetricsFun != nullptr) { - auto metrics = mGetPipelineMetricsFun(); +void LogtailPlugin::GetGoMetrics(std::vector>& metircsList, const string& metricType) { + if (mGetGoMetricsFun != nullptr) { + GoString type; + type.n = metricType.size(); + type.p = metricType.c_str(); + auto metrics = mGetGoMetricsFun(type); if (metrics != nullptr) { for (int i = 0; i < metrics->count; ++i) { std::map item; diff --git a/core/go_pipeline/LogtailPlugin.h b/core/go_pipeline/LogtailPlugin.h index 990b35cf8f..ec59bc75e3 100644 --- a/core/go_pipeline/LogtailPlugin.h +++ b/core/go_pipeline/LogtailPlugin.h @@ -144,7 +144,7 @@ typedef GoInt (*InitPluginBaseV2Fun)(GoString cfg); typedef GoInt (*ProcessLogsFun)(GoString c, GoSlice l, GoString p, GoString t, GoSlice tags); typedef GoInt (*ProcessLogGroupFun)(GoString c, GoSlice l, GoString p); typedef struct innerContainerMeta* (*GetContainerMetaFun)(GoString containerID); -typedef InnerPluginMetrics* (*GetPipelineMetricsFun)(); +typedef InnerPluginMetrics* (*GetGoMetricsFun)(GoString metricType); // Methods export by adapter. typedef int (*IsValidToSendFun)(long long logstoreKey); @@ -266,7 +266,7 @@ class LogtailPlugin { K8sContainerMeta GetContainerMeta(const std::string& containerID); - void GetPipelineMetrics(std::vector>& metircsList); + void GetGoMetrics(std::vector>& metircsList, const std::string& metricType); private: void* mPluginBasePtr; @@ -287,7 +287,7 @@ class LogtailPlugin { ProcessLogsFun mProcessLogsFun; ProcessLogGroupFun mProcessLogGroupFun; GetContainerMetaFun mGetContainerMetaFun; - GetPipelineMetricsFun mGetPipelineMetricsFun; + GetGoMetricsFun mGetGoMetricsFun; // Configuration for plugin system in JSON format. Json::Value mPluginCfg; diff --git a/core/helper/CMakeLists.txt b/core/helper/CMakeLists.txt deleted file mode 100644 index 91a15c3bc3..0000000000 --- a/core/helper/CMakeLists.txt +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2022 iLogtail Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cmake_minimum_required(VERSION 3.22) -project(helper) - -if (LINUX) - add_executable(LogtailInsight LogtailInsight.cpp LogtailInsight.h) - target_link_libraries(LogtailInsight log_pb common) - link_jsoncpp(LogtailInsight) -endif () \ No newline at end of file diff --git a/core/helper/LogtailInsight.cpp b/core/helper/LogtailInsight.cpp deleted file mode 100644 index 8cdcfb71dd..0000000000 --- a/core/helper/LogtailInsight.cpp +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2022 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "LogtailInsight.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "protobuf/sls/sls_logs.pb.h" -#include "common/LogtailCommonFlags.h" -#include "common/TimeUtil.h" - -#ifdef ENABLE_COMPATIBLE_MODE -extern "C" { -#include -asm(".symver memcpy, memcpy@GLIBC_2.2.5"); -void* __wrap_memcpy(void* dest, const void* src, size_t n) { - return memcpy(dest, src, n); -} -} -#endif - -namespace logtail { - -LogtailInsight* LogtailInsight::s_instance = NULL; - -struct CmdHeader { - int32_t type; - int32_t len; -}; - -/* Create a client endpoint and connect to a server. Returns fd if all OK, <0 on error. */ -int unix_socket_conn(const char* servername) { - int fd; - if ((fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) /* create a UNIX domain stream socket */ - { - cout << "init unix socket error " << strerror(errno) << endl; - return -1; - } - if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) { - cout << "init unix socket error " << strerror(errno) << endl; - close(fd); - return (-1); - } - int rval; - struct sockaddr_un un; - bzero(&un, sizeof(un)); - un.sun_family = AF_UNIX; - strcpy(un.sun_path, servername); - if (connect(fd, (struct sockaddr*)&un, sizeof(un)) < 0) { - rval = -4; - } else { - return fd; - } - cout << "server : " << servername << " connect socket address error " << strerror(errno) << endl; - close(fd); - return rval; -} - -int RecvAndWait(int fd, char* data, int len, int waitSec) { - int bodySize = 0; - uint32_t beforeTime = time(NULL); - while (bodySize < len) { - int recvLen = recv(fd, data + bodySize, len - bodySize, MSG_DONTWAIT); - if (recvLen <= 0) { - if (errno == EAGAIN || errno == EWOULDBLOCK) { - if (time(NULL) - beforeTime > (uint32_t)waitSec) { - cout << "recv from fd timeout : " << strerror(errno) << ", rst : " << recvLen - << ", expect size : " << len << endl; - return -1; - } - usleep(10 * 1000); - } else { - cout << "recv from fd error, error : " << strerror(errno) << ", rst : " << recvLen - << ", expect size : " << len << endl; - return recvLen; - } - } else { - bodySize += recvLen; - } - } - return bodySize; -} - -int RecvLogGroup(int fd, sls_logs::LogGroup* logGroup) { - CmdHeader header; - int headerSize = RecvAndWait(fd, (char*)&header, sizeof(header), 2); - if (headerSize != 8) { - cout << "recv header error" << endl; - return -1; - } - if (header.type != 0x19 || header.len <= 0 || header.len > 100 * 1024 * 1024) { - cout << "recv error header, type " << header.type << ", len : " << header.len << endl; - return -1; - } - char* data = (char*)malloc(header.len); - int bodySize = RecvAndWait(fd, data, header.len, 1); - if (bodySize != header.len) { - cout << "recv body error" << endl; - free(data); - return -1; - } - if (!logGroup->ParseFromArray(data, bodySize)) { - cout << "parse log group from data error" << endl; - free(data); - return -1; - } - free(data); - return 0; -} - -void PrintLog(const sls_logs::LogGroup& logGroup, bool jsonOutPut, bool detail) { - if (jsonOutPut) { - Json::Value rootValue; - for (int i = 0; i < logGroup.logs_size(); ++i) { - Json::Value logValue; - const sls_logs::Log& log = logGroup.logs(i); - for (int j = 0; j < log.contents_size(); ++j) { - const sls_logs::Log_Content& content = log.contents(j); - logValue[content.key()] = content.value(); - } - rootValue[ToString(i)] = logValue; - } - cout << rootValue.toStyledString() << endl; - } else { - if (detail) { - for (int i = 0; i < logGroup.logs_size(); ++i) { - const sls_logs::Log& log = logGroup.logs(i); - for (int j = 0; j < log.contents_size(); ++j) { - const sls_logs::Log_Content& content = log.contents(j); - cout << content.key() << ": " << content.value() << endl; - } - cout << endl; - } - } else { - for (int i = 0; i < logGroup.logs_size(); ++i) { - const sls_logs::Log& log = logGroup.logs(i); - for (int j = 0; j < log.contents_size(); ++j) { - const sls_logs::Log_Content& content = log.contents(j); - if (content.key() == "isFinished") { - cout << content.value() << endl; - } - } - } - } - } -} - -void LogtailInsight::GetTimeStr(const string& allStr, string& timeStr) { - size_t leftPos = allStr.find('['); - size_t rightPos = allStr.find(']'); - if (leftPos != string::npos && rightPos != string::npos && leftPos < rightPos) { - timeStr = allStr.substr(leftPos + 1, rightPos - leftPos - 1); - } - mTimeString = timeStr; - mTimeValue = StringToDatetime(mTimeString); -} - -int LogtailInsight::ExecuteCommand(const string& serverAddress, - const string& cmdType, - const std::vector& paramList, - bool jsonOutPut, - bool detailFlag) { - int fd = unix_socket_conn(serverAddress.c_str()); - if (fd < 0) { - return -1; - } - sls_logs::Log cmdLog; - auto now = GetCurrentLogtailTime(); - SetLogTime(&cmdLog, now.tv_sec); - sls_logs::Log_Content* cmdTypeContent = cmdLog.add_contents(); - cmdTypeContent->set_key("type"); - cmdTypeContent->set_value(cmdType); - for (size_t i = 0; i < paramList.size(); ++i) { - sls_logs::Log_Content* cmdParam = cmdLog.add_contents(); - cmdParam->set_key("param"); - cmdParam->set_value(paramList[i]); - } - string cmdLogString = cmdLog.SerializePartialAsString(); - // send header - CmdHeader header; - header.type = 0x19; - header.len = cmdLogString.size(); - - send(fd, (const char*)(&header), sizeof(header), 0); - int sendSize = send(fd, cmdLogString.c_str(), cmdLogString.size(), 0); - if (sendSize < 0) { - close(fd); - cout << "send data to domain socker error , server : " << serverAddress << ", error : " << errno << endl; - return -1; - } - - sls_logs::LogGroup logGroup; - if (RecvLogGroup(fd, &logGroup) == 0) { - PrintLog(logGroup, jsonOutPut, detailFlag); - close(fd); - return 0; - } - close(fd); - return -1; -} - -void* LogtailInsight::ForcedExitTimer(void* arg) { - int waitMilliSeconds = (*(int*)arg) * 1000; - uint64_t startTimeMs = GetCurrentTimeInMilliSeconds(); - while (true) { - if (GetCurrentTimeInMilliSeconds() - startTimeMs > (uint64_t)waitMilliSeconds) { - exit(-1); - } - usleep(100 * 1000); - } - - return 0; -} - -LogtailInsight::LogtailInsight() { - mTimeValue = 0; -} - -LogtailInsight::~LogtailInsight() { -} - -} // namespace logtail - -int main(int argc, char* argv[]) { - if (argc < 3 || argv[1] != string("status")) { - cout << ("invalid param, use status -h for help.") << endl; - return 10; - } - - auto pInsight = logtail::LogtailInsight::GetInstance(); - - argv += 1; - argc -= 1; - - string opt = argv[1]; - string project; - string logstore; - string filename; - string index = "1"; - string endIndex; - - int rst = 0; - if (opt == "all") { - string index = "1"; - if (argc > 2) { - index = argv[2]; - } - rst = pInsight->InitStatus(index); - if (rst != 0) { - return rst; - } - cout << pInsight->GetLogtailStatus(); - } else if (opt == "detail") { - if (argc > 2) { - index = argv[2]; - } - rst = pInsight->InitStatus(index); - if (rst != 0) { - return rst; - } - cout << pInsight->GetLogtailDetail(); - } - - else if (opt == "active") { - bool listLogstore = true; - if (argc > 5 && string(argv[2]) == "--logfile") { - listLogstore = false; - index = argv[3]; - project = argv[4]; - logstore = argv[5]; - } else if (argc > 3 && string(argv[2]) == "--logstore") { - listLogstore = true; - index = argv[3]; - } else if (argc > 2) { - index = argv[2]; - } - - rst = pInsight->InitProfile(index); - if (rst != 0) { - return rst; - } - if (listLogstore) { - cout << pInsight->ListAllLogstore(); - } else { - cout << pInsight->listAllFiles(project, logstore); - } - } else if (opt == "logstore") { - if (argc < 5 || (argv[2][0] == '-' && argc < 6)) { - cout << ("invalid param, use -h for help.") << endl; - return 10; - } - int format = 0; - if (string(argv[2]).find("--format=") != string::npos) { - format = string(argv[2]).substr(9) == "json" ? 1 : 0; - argv += 1; - } - index = argv[2]; - project = argv[3]; - logstore = argv[4]; - rst = pInsight->InitProfile(index); - if (rst != 0) { - return rst; - } - if (!format) { - cout << pInsight->GetLogStoreProfileSimple(project, logstore); - } else { - cout << pInsight->GetLogStoreProfileJson(project, logstore); - } - } - - else if (opt == "logfile") { - if (argc < 6 || (argv[2][0] == '-' && argc < 7)) { - cout << ("invalid param, use -h for help.") << endl; - return 10; - } - int format = 0; - if (string(argv[2]).find("--format=") != string::npos) { - format = string(argv[2]).substr(9) == "json" ? 1 : 0; - argv += 1; - } - index = argv[2]; - project = argv[3]; - logstore = argv[4]; - filename = argv[5]; - rst = pInsight->InitProfile(index); - if (rst != 0) { - return rst; - } - if (!format) { - cout << pInsight->GetFileProfileSimple(project, logstore, filename); - } else { - cout << pInsight->GetFileProfileJson(project, logstore, filename); - } - } else if (opt == "history") { - if (argc < 6) { - cout << ("invalid param, use -h for help.") << endl; - return 10; - } - index = argv[2]; - endIndex = argv[3]; - project = argv[4]; - logstore = argv[5]; - if (argc > 6) { - filename = argv[6]; - return pInsight->QueryLogfileRange(project, logstore, filename, index, endIndex); - } else { - return pInsight->QueryLogstoreRange(project, logstore, index, endIndex); - } - } else if (opt == "command") { - if (argc < 3) { - cout << ("invalid param, use -h for help.") << endl; - return 10; - } - string cmdType = argv[2]; - vector cmdParamList; - string serverAddress("/tmp/logtail.sock"); - bool jsonOutFlag = false, detailFlag = false; - for (int i = 3; i < argc; ++i) { - string value(argv[i]); - const static string serverAddressParam = "--server-address="; - const static string formatJson = "--format=json"; - if (value.find(serverAddressParam) == 0) { - serverAddress = value.substr(serverAddressParam.size()); - } else if (formatJson == value) { - jsonOutFlag = true; - } else { - if (value == "detail") { - detailFlag = true; - } - cmdParamList.push_back(value); - } - } - - pthread_t tid; - int waitSeconds = 5; - pthread_create(&tid, NULL, logtail::LogtailInsight::ForcedExitTimer, (void*)&waitSeconds); - return pInsight->ExecuteCommand(serverAddress, cmdType, cmdParamList, jsonOutFlag, detailFlag); - } else if (opt == "--help" || opt == "-h" || opt == "help") { - cout << "logtail insight, version : 0.1.0\n\n"; - cout << "commond list :\n"; - cout << " status all [index] \n get logtail running status \n"; - cout << " status active [--logstore | --logfile] index [project] [logstore] \n list all " - "active logstore | logfile. if use --logfile, please add project and logstore. default --logstore\n"; - cout << " status logstore [--format=line | json] index project logstore \n get logstore " - "status with line or json style. default --format=line \n"; - cout << " status logfile [--format=line | json] index project logstore fileFullPath \n get " - "log file status with line or json style. default --format=line \n"; - cout << " status history beginIndex endIndex project logstore [fileFullPath] \n query " - "logstore | logfile history status. \n\n"; - cout << "index : from 1 to 60. in all, it means last $(index) minutes; in active/logstore/logfile/history, " - "it means last $(index)*10 minutes \n"; - } else { - cout << ("invalid param, use -h for help.") << endl; - rst = 10; - } - - return rst; -} diff --git a/core/helper/LogtailInsight.h b/core/helper/LogtailInsight.h deleted file mode 100644 index 9d46603996..0000000000 --- a/core/helper/LogtailInsight.h +++ /dev/null @@ -1,893 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "common/FileSystemUtil.h" -#include "common/RuntimeUtil.h" - -using namespace std; - -namespace logtail { - -struct LogStoreStatistic { - LogStoreStatistic(uint64_t readBytes = 0, - uint64_t skipBytes = 0, - uint64_t splitLines = 0, - uint64_t parseFailures = 0, - uint64_t regexMatchFailures = 0, - uint64_t parseTimeFailures = 0, - uint64_t historyFailures = 0, - uint64_t sendFailures = 0, - const std::string& errorLine = "") - : mReadBytes(readBytes), - mSkipBytes(skipBytes), - mSplitLines(splitLines), - mParseFailures(parseFailures), - mRegexMatchFailures(regexMatchFailures), - mParseTimeFailures(parseTimeFailures), - mHistoryFailures(historyFailures), - mSendFailures(sendFailures), - mErrorLine(errorLine) { - mLastUpdateTime = time(NULL); - mFileDev = 0; - mFileInode = 0; - mFileSize = 0; - mReadOffset = 0; - mLastReadTime = 0; - mReadCount = 0; - mReadDelayAvg = 0; - mMaxUnsendTime = 0; - mMinUnsendTime = 0; - mMaxSendSuccessTime = 0; - mSendQueueSize = 0; - mSendNetWorkErrorCount = 0; - mSendQuotaErrorCount = 0; - mSendDiscardErrorCount = 0; - mSendSuccessCount = 0; - mValidToSendFlag = false; - mSendBlockFlag = false; - } - - void Reset() { - mFileDev = 0; - mFileInode = 0; - mFileSize = 0; - mReadOffset = 0; - mLastReadTime = 0; - mReadCount = 0; - mReadDelayAvg = 0; - mReadBytes = 0; - mSkipBytes = 0; - mSplitLines = 0; - mParseFailures = 0; - mRegexMatchFailures = 0; - mParseTimeFailures = 0; - mHistoryFailures = 0; - mSendFailures = 0; - mErrorLine.clear(); - - mMaxUnsendTime = 0; - mMinUnsendTime = 0; - mMaxSendSuccessTime = 0; - mSendQueueSize = 0; - mSendNetWorkErrorCount = 0; - mSendQuotaErrorCount = 0; - mSendDiscardErrorCount = 0; - mSendSuccessCount = 0; - mValidToSendFlag = false; - mSendBlockFlag = false; - } - - void UpdateReadInfo(uint64_t dev, uint64_t inode, uint64_t fileSize, uint64_t readOffset, int32_t lastReadTime) { - mFileDev = dev; - mFileInode = inode; - mFileSize = fileSize; - mReadOffset = readOffset; - mLastReadTime = lastReadTime; - ++mReadCount; - mReadDelayAvg += fileSize > readOffset ? fileSize - readOffset : 0; - } - - std::string mProjectName; - std::string mCategory; - std::string mConfigName; - std::string mFilename; - // how many bytes processed - uint64_t mReadBytes; - // how many bytes skiped - uint64_t mSkipBytes; - // how many lines processed: mSplitLines - // how many lines parse failed: mParseFailures - // how many lines send failed: mSendFailures - // how many lines succeed send: mSplitLines - mParseFailures - mSendFailures - uint64_t mSplitLines; - // how many lines parse fails (include all failures) - uint64_t mParseFailures; - // how many lines regex match fail(include boost crash or not match) - uint64_t mRegexMatchFailures; - // how many lines parse timeformat fail - uint64_t mParseTimeFailures; - // how many lines history data discarded - uint64_t mHistoryFailures; - // how many lines send fails - uint64_t mSendFailures; - // one sample error line - std::string mErrorLine; - int32_t mLastUpdateTime; - - uint64_t mFileDev; - uint64_t mFileInode; - uint64_t mFileSize; - uint64_t mReadOffset; - int32_t mLastReadTime; - // ++mReadCount every call - uint32_t mReadCount; - // mReadDelaySum += mFileSize - mReadOffset every call - // then average delay is mReadDelaySum / mReadCount - uint64_t mReadDelayAvg; - - string mStatus; - - int32_t mMaxUnsendTime; - int32_t mMinUnsendTime; - int32_t mMaxSendSuccessTime; - uint32_t mSendQueueSize; - uint32_t mSendNetWorkErrorCount; - uint32_t mSendQuotaErrorCount; - uint32_t mSendDiscardErrorCount; - uint32_t mSendSuccessCount; - bool mSendBlockFlag; - bool mValidToSendFlag; -}; - -template -string ToString(const T& val) { - stringstream ss; - ss << val; - return ss.str(); -} - -template <> -string ToString(const bool& val) { - return val ? "true" : "false"; -} - -int min(int l, int r) { - return l <= r ? l : r; -} - -string ToWithString(string str, int with) { - int deltaSize = with - str.size(); - if (deltaSize > 0) { - string rst; - rst.append((size_t)deltaSize, ' '); - rst.append(str); - return rst; - } - return str; -} - -string ToHumanReadableByteCount(int64_t bytes) { - // Static lookup table of byte-based SI units - static const char* suffix[][2] = {{"B", "B"}, - {"kB", "KiB"}, - {"MB", "MiB"}, - {"GB", "GiB"}, - {"TB", "TiB"}, - {"EB", "EiB"}, - {"ZB", "ZiB"}, - {"YB", "YiB"}}; - int unit = 1024; - double coeff = 0.f; - int exp = 0; - char rst[32]; - if (bytes > 0) { - exp = min((int)(log(bytes) / log(unit)), (int)sizeof(suffix) / sizeof(suffix[0]) - 1); - } - coeff = bytes / pow(unit, exp); - if (exp == 0) { - sprintf(rst, "%d%s", (int)bytes, suffix[exp][0]); - } else { - sprintf(rst, "%.2f%s", coeff, suffix[exp][0]); - } - return rst; -} - -// Create by david zhang. 2017/08/08 10:28:31 -class LogtailInsight { -public: - LogtailInsight(); - ~LogtailInsight(); - -public: - static LogtailInsight* GetInstance() { - if (s_instance == NULL) { - s_instance = new LogtailInsight; - } - return s_instance; - } - - static void FinalizeInstance() { - if (s_instance != NULL) { - delete s_instance; - s_instance = NULL; - } - } - - int ExecuteCommand(const string& serverAddress, - const string& cmdType, - const std::vector& paramList, - bool jsonOutPut, - bool detail); - static void* ForcedExitTimer(void* arg); - - int64_t JsonValueToInt64(const Json::Value& category) { - int64_t val = 0; - stringstream ss; - ss << category.asString(); - ss >> val; - return val; - } - - - void JsonToLogStoreStatistic(Json::Value& category, bool logstoreFlag) { - LogStoreStatistic* pStatistics = new LogStoreStatistic(); - if (category.isMember("project")) - pStatistics->mProjectName = category["project"].asString(); - if (category.isMember("logstore")) - pStatistics->mCategory = category["logstore"].asString(); - if (category.isMember("config_name")) - pStatistics->mConfigName = category["config_name"].asString(); - if (category.isMember("file")) { - pStatistics->mFilename = category["file"].asString(); - } - if (category.isMember("read_bytes")) - pStatistics->mReadBytes = JsonValueToInt64(category["read_bytes"]); - if (category.isMember("skip_bytes")) - pStatistics->mSkipBytes = JsonValueToInt64(category["skip_bytes"]); - if (category.isMember("split_lines")) - pStatistics->mSplitLines = JsonValueToInt64(category["split_lines"]); - if (category.isMember("parse_fail_lines")) - pStatistics->mParseFailures = JsonValueToInt64(category["parse_fail_lines"]); - if (category.isMember("file_dev")) - pStatistics->mFileDev = JsonValueToInt64(category["file_dev"]); - if (category.isMember("file_inode")) - pStatistics->mFileInode = JsonValueToInt64(category["file_inode"]); - if (category.isMember("last_read_time")) - pStatistics->mLastReadTime = JsonValueToInt64(category["last_read_time"]); - if (category.isMember("read_count")) - pStatistics->mReadCount = JsonValueToInt64(category["read_count"]); - if (category.isMember("file_size")) - pStatistics->mFileSize = JsonValueToInt64(category["file_size"]); - if (category.isMember("read_offset")) - pStatistics->mReadOffset = JsonValueToInt64(category["read_offset"]); - if (category.isMember("read_avg_delay")) - pStatistics->mReadDelayAvg = JsonValueToInt64(category["read_avg_delay"]); - if (category.isMember("max_unsend_time")) - pStatistics->mMaxUnsendTime = JsonValueToInt64(category["max_unsend_time"]); - if (category.isMember("min_unsend_time")) - pStatistics->mMinUnsendTime = JsonValueToInt64(category["min_unsend_time"]); - if (category.isMember("max_send_success_time")) - pStatistics->mMaxSendSuccessTime = JsonValueToInt64(category["max_send_success_time"]); - if (category.isMember("send_queue_size")) - pStatistics->mSendQueueSize = JsonValueToInt64(category["send_queue_size"]); - if (category.isMember("send_network_error")) - pStatistics->mSendNetWorkErrorCount = JsonValueToInt64(category["send_network_error"]); - if (category.isMember("send_quota_error")) - pStatistics->mSendQuotaErrorCount = JsonValueToInt64(category["send_quota_error"]); - if (category.isMember("send_discard_error")) - pStatistics->mSendDiscardErrorCount = JsonValueToInt64(category["send_discard_error"]); - if (category.isMember("send_success_count")) - pStatistics->mSendSuccessCount = JsonValueToInt64(category["send_success_count"]); - if (category.isMember("send_block_flag")) - pStatistics->mSendBlockFlag = category["send_block_flag"].asString() == "true"; - if (category.isMember("sender_valid_flag")) - pStatistics->mValidToSendFlag = category["sender_valid_flag"].asString() == "true"; - - pStatistics->mStatus = "ok"; - - if (logstoreFlag) { - if (pStatistics->mReadDelayAvg > 10 * 1024 * 104) { - pStatistics->mStatus = "process_block"; - } - if (pStatistics->mParseFailures > 0) { - pStatistics->mStatus = "parse_fail"; - } - if (pStatistics->mMinUnsendTime > 0 && mTimeValue - pStatistics->mMinUnsendTime > 10) { - pStatistics->mStatus = "send_block"; - } - if (pStatistics->mMinUnsendTime > 0 && mTimeValue - pStatistics->mMinUnsendTime > 60) { - pStatistics->mStatus = "sender_invalid"; - } - if (pStatistics->mValidToSendFlag == false) { - pStatistics->mStatus = "sender_invalid"; - } - - // cout << "add, project :" << pStatistics->mProjectName << " logstore :" << pStatistics->mCategory << endl; - mLogstoreProfileMap[pStatistics->mProjectName + '#' + pStatistics->mCategory] = pStatistics; - } else { - if (pStatistics->mReadDelayAvg > 10 * 1024 * 104) { - pStatistics->mStatus = "process_block"; - } - if (pStatistics->mParseFailures > 0) { - pStatistics->mStatus = "parse_fail"; - } - // cout << "add, project :" << pStatistics->mProjectName << " logstore :" << pStatistics->mCategory << " - // file :" << pStatistics->mFilename << endl; - mFileProfileMap[pStatistics->mProjectName + '#' + pStatistics->mCategory + '#' + pStatistics->mFilename] - = pStatistics; - } - } - - bool StringToJson(const string& fileContent, Json::Value& valueJson) { - Json::CharReaderBuilder builder; - builder["collectComments"] = false; - std::unique_ptr jsonReader(builder.newCharReader()); - std::string jsonParseErrs; - if (!jsonReader->parse( - fileContent.data(), fileContent.data() + fileContent.size(), &valueJson, &jsonParseErrs)) { - cout << "Parse json error: " << jsonParseErrs << std::endl; - return false; - } - return true; - } - - string GetProcessExecutionDir(void) { - char exePath[PATH_MAX + 1] = ""; - readlink("/proc/self/exe", exePath, sizeof(exePath)); - string fullPath(exePath); - size_t index = fullPath.rfind("/"); - if (index == string::npos) { - return string(); - } - return fullPath.substr(0, index + 1); - } - - time_t StringToDatetime(string str) { - char* cha = (char*)str.data(); - tm tm_; - int year = 1900; - int month = 1; - int day = 0; - int hour = 0; - int minute = 0; - int second = 0; - if (6 != sscanf(cha, "%d-%d-%d %d:%d:%d", &year, &month, &day, &hour, &minute, &second)) { - cout << string("cast time fail, " + str) << endl; - return 0; - } - tm_.tm_year = year - 1900; - tm_.tm_mon = month - 1; - tm_.tm_mday = day; - tm_.tm_hour = hour; - tm_.tm_min = minute; - tm_.tm_sec = second; - tm_.tm_isdst = 0; - time_t t_ = mktime(&tm_); - return t_; - } - - string DateTimeToString(time_t timeVal) { - struct tm time_info; - char timeString[32]; // space for "HH:MM:SS\0" - - localtime_r(&timeVal, &time_info); - - strftime(timeString, sizeof(timeString), "%y-%m-%d %H:%M:%S", &time_info); - return timeString; - } - - int StringToStatus(const string& fileContent) { - stringstream ss(fileContent); - while (!ss.eof()) { - string line; - string key; - string val; - getline(ss, line); - if (line.size() == 0) - continue; - if (line[0] == '#') { - continue; - } - size_t sept = line.find(':'); - if (sept == string::npos || sept == line.size() - 1) { - continue; - } - - key = line.substr(0, sept); - val = line.substr(sept + 1); - mStatusMap[key] = val; - // cout << "key value : " << key << val << endl; - } - return 0; - } - - string GetLogtailStatus() { - return mStatusMap.find("status") != mStatusMap.end() ? mStatusMap["status"] : string("UnknowError"); - } - - - string GetLogtailDetail() { - string rstStr; - for (map::iterator iter = mStatusMap.begin(); iter != mStatusMap.end(); ++iter) { - rstStr += iter->first + " : " + iter->second + "\n"; - } - rstStr += string("time_readable") + " : " + mTimeString + "\n"; - rstStr += string("time") + " : " + ToString(mTimeValue); - return rstStr; - } - - string ListAllLogstore() { - string rst; - map::iterator iter = mLogstoreProfileMap.begin(); - for (; iter != mLogstoreProfileMap.end(); ++iter) { - rst += iter->second->mProjectName + " : " + iter->second->mCategory + "\n"; - } - return rst; - } - - string listAllFiles(string project, string logstore) { - string rst; - map::iterator iter = mFileProfileMap.begin(); - for (; iter != mFileProfileMap.end(); ++iter) { - if (iter->second->mProjectName == project && iter->second->mCategory == logstore) { - rst += iter->second->mFilename + "\n"; - } - } - return rst; - } - - string GetLogStoreProfileJson(string project, string logstore) { - Json::Value rstValue; - string key = project + '#' + logstore; - map::iterator iter = mLogstoreProfileMap.find(key); - if (iter == mLogstoreProfileMap.end()) { - // rstValue["find"] = Json::Value(false); - return rstValue.toStyledString(); - } - LogStoreStatistic* pStatistics = iter->second; - // rstValue["find"] = Json::Value(true); - rstValue["time_begin_readable"] = Json::Value(DateTimeToString(mTimeValue - 600)); - rstValue["time_end_readable"] = Json::Value(DateTimeToString(mTimeValue)); - rstValue["time_begin"] = Json::Int(mTimeValue - 600); - rstValue["time_end"] = Json::Value(mTimeValue); - - rstValue["project"] = Json::Value(pStatistics->mProjectName); - rstValue["logstore"] = Json::Value(pStatistics->mCategory); - rstValue["config"] = Json::Value(pStatistics->mConfigName); - rstValue["read_bytes"] = Json::Int64(pStatistics->mReadBytes); - rstValue["parse_success_lines"] = Json::Int64(pStatistics->mSplitLines); - rstValue["parse_fail_lines"] = Json::Int64(pStatistics->mParseFailures); - rstValue["last_read_time"] = Json::Int64(pStatistics->mLastReadTime); - rstValue["read_count"] = Json::Int64(pStatistics->mReadCount); - rstValue["avg_delay_bytes"] = Json::Int64(pStatistics->mReadDelayAvg); - - rstValue["max_unsend_time"] = Json::Int64(pStatistics->mMaxUnsendTime); - rstValue["min_unsend_time"] = Json::Int64(pStatistics->mMinUnsendTime); - rstValue["max_send_success_time"] = Json::Int64(pStatistics->mMaxSendSuccessTime); - rstValue["send_queue_size"] = Json::Int64(pStatistics->mSendQueueSize); - rstValue["send_network_error_count"] = Json::Int64(pStatistics->mSendNetWorkErrorCount); - rstValue["send_network_quota_count"] = Json::Int64(pStatistics->mSendQuotaErrorCount); - rstValue["send_network_discard_count"] = Json::Int64(pStatistics->mSendDiscardErrorCount); - rstValue["send_success_count"] = Json::Int64(pStatistics->mSendSuccessCount); - rstValue["send_block_flag"] = Json::Value(pStatistics->mSendBlockFlag); - rstValue["sender_valid_flag"] = Json::Value(pStatistics->mValidToSendFlag); - rstValue["status"] = Json::Value(pStatistics->mStatus); - - - return rstValue.toStyledString(); - } - - string GetLogStoreProfileSimple(string project, string logstore) { - string key = project + '#' + logstore; - map::iterator iter = mLogstoreProfileMap.find(key); - if (iter == mLogstoreProfileMap.end()) { - return ""; - // return "find : false"; - } - LogStoreStatistic* pStatistics = iter->second; - // string rstStr = "find : true"; - string rstStr = "time_begin_readable : " + DateTimeToString(mTimeValue - 600); - rstStr += "\ntime_end_readable : " + DateTimeToString(mTimeValue); - rstStr += "\ntime_begin : " + ToString(mTimeValue - 600); - rstStr += "\ntime_end : " + ToString(mTimeValue); - rstStr += "\nproject : " + ToString(pStatistics->mProjectName); - rstStr += "\nlogstore : " + ToString(pStatistics->mCategory); - rstStr += "\nstatus : " + ToString(pStatistics->mStatus); - rstStr += "\nconfig : " + ToString(pStatistics->mConfigName); - rstStr += "\nread_bytes : " + ToString(pStatistics->mReadBytes); - rstStr += "\nparse_success_lines : " + ToString(pStatistics->mSplitLines); - rstStr += "\nparse_fail_lines : " + ToString(pStatistics->mParseFailures); - rstStr += "\nlast_read_time : " + ToString(pStatistics->mLastReadTime); - rstStr += "\nread_count : " + ToString(pStatistics->mReadCount); - rstStr += "\navg_delay_bytes : " + ToString(pStatistics->mReadDelayAvg); - rstStr += "\nmax_unsend_time : " + ToString(pStatistics->mMaxUnsendTime); - rstStr += "\nmin_unsend_time : " + ToString(pStatistics->mMinUnsendTime); - rstStr += "\nmax_send_success_time : " + ToString(pStatistics->mMaxSendSuccessTime); - rstStr += "\nsend_queue_size : " + ToString(pStatistics->mSendQueueSize); - rstStr += "\nsend_network_error_count : " + ToString(pStatistics->mSendNetWorkErrorCount); - rstStr += "\nsend_network_quota_count : " + ToString(pStatistics->mSendQuotaErrorCount); - rstStr += "\nsend_network_discard_count : " + ToString(pStatistics->mSendDiscardErrorCount); - rstStr += "\nsend_success_count : " + ToString(pStatistics->mSendSuccessCount); - rstStr += "\nsend_block_flag : " + ToString(pStatistics->mSendBlockFlag); - rstStr += "\nsender_valid_flag : " + ToString(pStatistics->mValidToSendFlag); - - return rstStr; - } - - string GetLogStoreProfileOneLine(string project, string logstore) { - string key = project + '#' + logstore; - map::iterator iter = mLogstoreProfileMap.find(key); - if (iter == mLogstoreProfileMap.end()) { - return ""; - // return "find : false"; - } - LogStoreStatistic* pStatistics = iter->second; - // string rstStr = "find : true"; - string rstStr = ToWithString(DateTimeToString(mTimeValue - 600), 18); - rstStr += " " + ToWithString(pStatistics->mStatus, 14); - rstStr += " " + ToWithString(ToHumanReadableByteCount(pStatistics->mReadBytes), 8); - rstStr += " " + ToWithString(ToString(pStatistics->mSplitLines), 13); - rstStr += " " + ToWithString(ToString(pStatistics->mParseFailures), 10); - rstStr += " " + ToWithString(DateTimeToString(pStatistics->mLastReadTime), 18); - rstStr += " " + ToWithString(ToString(pStatistics->mReadCount), 10); - rstStr += " " + ToWithString(ToHumanReadableByteCount(pStatistics->mReadDelayAvg), 9); - rstStr += " " + ToWithString(ToString(pStatistics->mSendQueueSize), 10); - rstStr += " " + ToWithString(ToString(pStatistics->mSendNetWorkErrorCount), 13); - rstStr += " " + ToWithString(ToString(pStatistics->mSendQuotaErrorCount), 11); - rstStr += " " + ToWithString(ToString(pStatistics->mSendDiscardErrorCount), 13); - rstStr += " " + ToWithString(ToString(pStatistics->mSendSuccessCount), 12); - rstStr += " " + ToWithString(ToString(pStatistics->mSendBlockFlag), 10); - rstStr += " " + ToWithString(ToString(pStatistics->mValidToSendFlag), 10); - rstStr += " " + ToWithString(DateTimeToString(pStatistics->mMaxUnsendTime), 18); - rstStr += " " + ToWithString(DateTimeToString(pStatistics->mMinUnsendTime), 18); - rstStr += " " + ToWithString(DateTimeToString(pStatistics->mMaxSendSuccessTime), 18) + "\n"; - - return rstStr; - } - - - string GetLogStoreProfileHeader() { - string rstStr = " begin_time"; // 18 - rstStr += " status"; // 12 - rstStr += " read"; // 8 - rstStr += " parse_success"; // 13 - rstStr += " parse_fail"; // 10 - rstStr += " last_read_time"; // 18 - rstStr += " read_count"; // 10 - rstStr += " avg_delay"; // 9 - rstStr += " send_queue"; // 10 - rstStr += " network_error"; // 13 - rstStr += " quota_error"; // 11 - rstStr += " discard_error"; // 13 - rstStr += " send_success"; // 12 - rstStr += " send_block"; // 10 - rstStr += " send_valid"; // 10 - rstStr += " max_unsend"; // 18 - rstStr += " min_unsend"; // 18 - rstStr += " max_send_success\n"; // 18 - return rstStr; - } - - string GetFileProfileJson(string project, string logstore, string fileName) { - Json::Value rstValue; - string key = project + '#' + logstore + '#' + fileName; - map::iterator iter = mFileProfileMap.find(key); - if (iter == mFileProfileMap.end()) { - // rstValue["find"] = Json::Value(false); - return rstValue.toStyledString(); - } - LogStoreStatistic* pStatistics = iter->second; - // rstValue["find"] = Json::Value(true); - rstValue["time_begin_readable"] = Json::Value(DateTimeToString(mTimeValue - 600)); - rstValue["time_end_readable"] = Json::Value(DateTimeToString(mTimeValue)); - rstValue["time_begin"] = Json::Int(mTimeValue - 600); - rstValue["time_end"] = Json::Value(mTimeValue); - - rstValue["project"] = Json::Value(pStatistics->mProjectName); - rstValue["logstore"] = Json::Value(pStatistics->mCategory); - rstValue["status"] = Json::Value(pStatistics->mStatus); - rstValue["config"] = Json::Value(pStatistics->mConfigName); - rstValue["file_path"] = Json::Value(pStatistics->mFilename); - rstValue["read_bytes"] = Json::Int64(pStatistics->mReadBytes); - rstValue["parse_success_lines"] = Json::Int64(pStatistics->mSplitLines); - rstValue["parse_fail_lines"] = Json::Int64(pStatistics->mParseFailures); - rstValue["last_read_time"] = Json::Int64(pStatistics->mLastReadTime); - rstValue["file_dev"] = Json::Int64(pStatistics->mFileDev); - rstValue["file_inode"] = Json::Int64(pStatistics->mFileInode); - rstValue["file_size_bytes"] = Json::Int64(pStatistics->mFileSize); - rstValue["read_offset_bytes"] = Json::Int64(pStatistics->mReadOffset); - rstValue["read_count"] = Json::Int64(pStatistics->mReadCount); - rstValue["avg_delay_bytes"] = Json::Int64(pStatistics->mReadDelayAvg); - - return rstValue.toStyledString(); - } - - string GetFileProfileSimple(string project, string logstore, string fileName) { - string key = project + '#' + logstore + '#' + fileName; - map::iterator iter = mFileProfileMap.find(key); - if (iter == mFileProfileMap.end()) { - return ""; - // return "find : false"; - } - LogStoreStatistic* pStatistics = iter->second; - // string rstStr = "find : true"; - - string rstStr = "time_begin_readable : " + DateTimeToString(mTimeValue - 600); - rstStr += "\ntime_end_readable : " + DateTimeToString(mTimeValue); - rstStr += "\ntime_begin : " + ToString(mTimeValue - 600); - rstStr += "\ntime_end : " + ToString(mTimeValue); - rstStr += "\nproject : " + ToString(pStatistics->mProjectName); - rstStr += "\nlogstore : " + ToString(pStatistics->mCategory); - rstStr += "\nstatus : " + ToString(pStatistics->mStatus); - rstStr += "\nconfig : " + ToString(pStatistics->mConfigName); - rstStr += "\nfile_path : " + ToString(pStatistics->mFilename); - rstStr += "\nfile_dev : " + ToString(pStatistics->mFileDev); - rstStr += "\nfile_inode : " + ToString(pStatistics->mFileInode); - rstStr += "\nfile_size_bytes : " + ToString(pStatistics->mFileSize); - rstStr += "\nfile_offset_bytes : " + ToString(pStatistics->mReadOffset); - rstStr += "\nread_bytes : " + ToString(pStatistics->mReadBytes); - rstStr += "\nparse_success_lines : " + ToString(pStatistics->mSplitLines); - rstStr += "\nparse_fail_lines : " + ToString(pStatistics->mParseFailures); - rstStr += "\nlast_read_time : " + ToString(pStatistics->mLastReadTime); - rstStr += "\nread_count : " + ToString(pStatistics->mReadCount); - rstStr += "\navg_delay_bytes : " + ToString(pStatistics->mReadDelayAvg); - rstStr += "\nread_offset_bytes : " + ToString(pStatistics->mReadOffset); - - - return rstStr; - } - - string GetFileProfileOneLine(string project, string logstore, string fileName) { - string key = project + '#' + logstore + '#' + fileName; - map::iterator iter = mFileProfileMap.find(key); - if (iter == mFileProfileMap.end()) { - return ""; - // return "find : false"; - } - LogStoreStatistic* pStatistics = iter->second; - // string rstStr = "find : true"; - string rstStr = ToWithString(DateTimeToString(mTimeValue - 600), 18); - rstStr += " " + ToWithString(pStatistics->mStatus, 14); - rstStr += " " + ToWithString(ToHumanReadableByteCount(pStatistics->mReadBytes), 8); - rstStr += " " + ToWithString(ToString(pStatistics->mSplitLines), 13); - rstStr += " " + ToWithString(ToString(pStatistics->mParseFailures), 10); - rstStr += " " + ToWithString(DateTimeToString(pStatistics->mLastReadTime), 18); - rstStr += " " + ToWithString(ToString(pStatistics->mReadCount), 10); - rstStr += " " + ToWithString(ToHumanReadableByteCount(pStatistics->mReadDelayAvg), 9); - rstStr += " " + ToWithString(ToString(pStatistics->mFileDev), 8); - rstStr += " " + ToWithString(ToString(pStatistics->mFileInode), 8); - rstStr += " " + ToWithString(ToHumanReadableByteCount(pStatistics->mFileSize), 9); - rstStr += " " + ToWithString(ToHumanReadableByteCount(pStatistics->mReadOffset), 11) + "\n"; - - return rstStr; - } - - string GetFileProfileHeader() { - string rstStr = " begin_time"; // 18 - rstStr += " status"; // 12 - rstStr += " read"; // 8 - rstStr += " parse_success"; // 13 - rstStr += " parse_fail"; // 10 - rstStr += " last_read_time"; // 18 - rstStr += " read_count"; // 10 - rstStr += " avg_delay"; // 9 - rstStr += " device"; // 8 - rstStr += " inode"; // 8 - rstStr += " file_size"; // 9 - rstStr += " read_offset\n"; // 11 - return rstStr; - } - - int ReadData(string filePath, string& fileContent, string& timeStr) { - filePath = GetProcessExecutionDir() + filePath; - FILE* pFile = fopen(filePath.c_str(), "r"); - // cout << "open file" << filePath << endl; - if (pFile == NULL) { - cout << string("query fail, error: ") + strerror(errno) << endl; - return errno; - } - fseek(pFile, 0, SEEK_END); - int fileSize = ftell(pFile); - fseek(pFile, 0, SEEK_SET); - - if (fileSize > 0) { - int rst = 0; - char* readBuf = (char*)malloc(fileSize + 1); - memset(readBuf, 0, fileSize + 1); - fread(readBuf, 1, fileSize, pFile); - - string allStr(readBuf); - - GetTimeStr(allStr, timeStr); - // cout << "time : " << timeStr << endl; - - size_t contentPos = allStr.find("\n:"); - if (contentPos == string::npos || contentPos == allStr.size() - 2) { - cout << (string("file format error, " + filePath).c_str()) << endl; - rst = 1; - goto err; - } - - fileContent = allStr.substr(contentPos + 2); - // cout << "fileContent : " << fileContent.substr(0, 100) << endl; - err: - free(readBuf); - fclose(pFile); - return rst; - } - cout << string("invalid profile, maybe logtail restart") << endl; - - fclose(pFile); - return 2; - } - - static std::string GetFilePath(const std::string& fileType, const std::string& delta) { - const std::string execDir = logtail::GetProcessExecutionDir(); - const std::string pathPrefix = "snapshot" + PATH_SEPARATOR + fileType + "."; - std::string v0FilePath = pathPrefix + "LOG." + delta; - std::string v1FilePath = pathPrefix + delta + ".LOG"; - - fsutil::PathStat v0Stat, v1Stat; - bool foundV0 = fsutil::PathStat::stat(execDir + v0FilePath, v0Stat); - bool foundV1 = fsutil::PathStat::stat(execDir + v1FilePath, v1Stat); - - if (foundV0 && !foundV1) { - return v0FilePath; - } - if (!foundV0 && foundV1) { - return v1FilePath; - } - // Both exist, return latest one. - return v1Stat.GetMtime() >= v0Stat.GetMtime() ? v1FilePath : v0FilePath; - } - - int InitStatus(string delta = "1", bool checkFlag = false) { - int val = atoi(delta.c_str()); - if (val < 1 || val > 60) { - cout << "invalid query interval" << endl; - return 1; - } - string contentStr; - string timeStr; - if (ReadData(GetFilePath("ilogtail_status", delta), contentStr, timeStr) != 0) { - return 1; - } - int32_t queryTime = time(NULL) - val * 60; - if (checkFlag) { - if (mTimeValue - queryTime > 60 || queryTime - mTimeValue > 60) { - cout << "no match time interval, please check logtail status" << endl; - return 1; - } - } - StringToStatus(contentStr); - return 0; - } - - int InitProfile(string delta = "1", bool checkFlag = false) { - int val = atoi(delta.c_str()); - if (val < 1 || val > 60) { - cout << "invalid query interval" << endl; - return 1; - } - string contentStr; - string timeStr; - if (ReadData(GetFilePath("ilogtail_profile", delta), contentStr, timeStr) != 0) { - return 1; - } - int32_t queryTime = time(NULL) - val * 600; - if (checkFlag) { - if (mTimeValue - queryTime > 600 || queryTime - mTimeValue > 600) { - cout << "no match time interval, please check logtail status" << endl; - return 1; - } - } - Json::Value rootValue; - StringToJson(contentStr, rootValue); - if (rootValue.isMember("detail") && rootValue["detail"].isArray()) { - Json::Value& values = rootValue["detail"]; - for (Json::ValueIterator iter = values.begin(); iter != values.end(); ++iter) { - JsonToLogStoreStatistic(*iter, false); - } - } - if (rootValue.isMember("logstore") && rootValue["logstore"].isArray()) { - Json::Value& values = rootValue["logstore"]; - for (Json::ValueIterator iter = values.begin(); iter != values.end(); ++iter) { - JsonToLogStoreStatistic(*iter, true); - } - } - return 0; - } - - int QueryLogstoreRange(string project, string logstore, string beginIndex, string endIndex) { - int bIndex = atoi(beginIndex.c_str()); - int eIndex = atoi(endIndex.c_str()); - if (bIndex < 1 || bIndex > 60 || eIndex < 1 || eIndex > 60 || bIndex > eIndex) { - cout << "invalid query interval" << endl; - return 1; - } - - cout << GetLogStoreProfileHeader(); - int rst = 0; - for (int i = bIndex; i <= eIndex; ++i) { - rst = InitProfile(ToString(i)); - if (rst != 0) { - continue; - } - cout << GetLogStoreProfileOneLine(project, logstore); - mLogstoreProfileMap.clear(); - mFileProfileMap.clear(); - } - return 0; - } - - int QueryLogfileRange(string project, string logstore, string file, string beginIndex, string endIndex) { - int bIndex = atoi(beginIndex.c_str()); - int eIndex = atoi(endIndex.c_str()); - if (bIndex < 1 || bIndex > 60 || eIndex < 1 || eIndex > 60 || bIndex > eIndex) { - cout << "invalid query interval" << endl; - return 1; - } - - cout << GetFileProfileHeader(); - int rst = 0; - for (int i = bIndex; i <= eIndex; ++i) { - rst = InitProfile(ToString(i)); - if (rst != 0) { - continue; - } - cout << GetFileProfileOneLine(project, logstore, file); - mLogstoreProfileMap.clear(); - mFileProfileMap.clear(); - } - return 0; - } - -private: - static LogtailInsight* s_instance; - - void GetTimeStr(const string& allStr, string& timeStr); - -protected: - map mStatusMap; - - Json::Value mProfileJson; - string mTimeString; - int32_t mTimeValue; - // project + logstore - map mLogstoreProfileMap; - // project + logstore + filename - map mFileProfileMap; - -private: -}; - -} // namespace logtail diff --git a/core/monitor/MetricConstants.cpp b/core/monitor/MetricConstants.cpp index 547cd26ed0..8284b8406a 100644 --- a/core/monitor/MetricConstants.cpp +++ b/core/monitor/MetricConstants.cpp @@ -33,7 +33,6 @@ const std::string METRIC_LABEL_INSTANCE_ID = "instance_id"; const std::string METRIC_LABEL_IP = "ip"; const std::string METRIC_LABEL_OS = "os"; const std::string METRIC_LABEL_OS_DETAIL = "os_detail"; -const std::string METRIC_LABEL_PROJECTS = "projects"; const std::string METRIC_LABEL_USER_DEFINED_ID = "user_defined_id"; const std::string METRIC_LABEL_UUID = "uuid"; const std::string METRIC_LABEL_VERSION = "version"; @@ -44,6 +43,7 @@ const std::string METRIC_AGENT_CPU = "agent_cpu_percent"; const std::string METRIC_AGENT_CPU_GO = "agent_go_cpu_percent"; const std::string METRIC_AGENT_MEMORY = "agent_memory_used_mb"; const std::string METRIC_AGENT_MEMORY_GO = "agent_go_memory_used_mb"; +const std::string METRIC_AGENT_GO_ROUTINES_TOTAL = "agent_go_routines_total"; const std::string METRIC_AGENT_OPEN_FD_TOTAL = "agent_open_fd_total"; const std::string METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL = "agent_polling_dir_cache_size_total"; const std::string METRIC_AGENT_POLLING_FILE_CACHE_SIZE_TOTAL = "agent_polling_file_cache_size_total"; diff --git a/core/monitor/MetricConstants.h b/core/monitor/MetricConstants.h index 1cd5dc561d..32c6adb601 100644 --- a/core/monitor/MetricConstants.h +++ b/core/monitor/MetricConstants.h @@ -35,7 +35,6 @@ extern const std::string METRIC_LABEL_INSTANCE_ID; extern const std::string METRIC_LABEL_IP; extern const std::string METRIC_LABEL_OS; extern const std::string METRIC_LABEL_OS_DETAIL; -extern const std::string METRIC_LABEL_PROJECTS; extern const std::string METRIC_LABEL_USER_DEFINED_ID; extern const std::string METRIC_LABEL_UUID; extern const std::string METRIC_LABEL_VERSION; @@ -46,6 +45,7 @@ extern const std::string METRIC_AGENT_CPU; extern const std::string METRIC_AGENT_CPU_GO; extern const std::string METRIC_AGENT_MEMORY; extern const std::string METRIC_AGENT_MEMORY_GO; +extern const std::string METRIC_AGENT_GO_ROUTINES_TOTAL; extern const std::string METRIC_AGENT_OPEN_FD_TOTAL; extern const std::string METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL; extern const std::string METRIC_AGENT_POLLING_FILE_CACHE_SIZE_TOTAL; diff --git a/core/monitor/MetricExportor.cpp b/core/monitor/MetricExportor.cpp index 153fe621ca..f8a1b10298 100644 --- a/core/monitor/MetricExportor.cpp +++ b/core/monitor/MetricExportor.cpp @@ -34,12 +34,13 @@ DECLARE_FLAG_STRING(metrics_report_method); namespace logtail { -const std::string agentLevelMetricKey = "metric-level"; -const std::string agentLevelMetricValue = "agent"; +const std::string METRIC_EXPORT_TYPE_GO = "direct"; +const std::string METRIC_EXPORT_TYPE_CPP = "cpp_provided"; MetricExportor::MetricExportor() : mSendInterval(60), mLastSendTime(time(NULL) - (rand() % (mSendInterval / 10)) * 10) { - // mGlobalCpuGo = LoongCollectorMonitor::GetInstance()->GetDoubleGauge(METRIC_AGENT_CPU_GO); - mGlobalMemGo = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_MEMORY_GO); + // mAgentCpuGo = LoongCollectorMonitor::GetInstance()->GetDoubleGauge(METRIC_AGENT_CPU_GO); + mAgentMemGo = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_MEMORY_GO); + mAgentGoRoutines = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_GO_ROUTINES_TOTAL); } void MetricExportor::PushMetrics(bool forceSend) { @@ -72,37 +73,13 @@ void MetricExportor::PushCppMetrics() { } void MetricExportor::PushGoMetrics() { - std::vector> goMetircsList; - LogtailPlugin::GetInstance()->GetPipelineMetrics(goMetircsList); + std::vector> goDirectMetircsList; + LogtailPlugin::GetInstance()->GetGoMetrics(goDirectMetircsList, METRIC_EXPORT_TYPE_GO); + std::vector> goCppProvidedMetircsList; + LogtailPlugin::GetInstance()->GetGoMetrics(goCppProvidedMetircsList, METRIC_EXPORT_TYPE_CPP); - // filter agent or plugin level metrics - std::vector> goPluginMetircsList; - for (auto goMetrics : goMetircsList) { - if (goMetrics.find(agentLevelMetricKey) != goMetrics.end()) { - // Go agent-level metrics - if (goMetrics.at(agentLevelMetricKey) == agentLevelMetricValue) { - SendGoAgentLevelMetrics(goMetrics); - continue; - } - } else { - // Go plugin-level metrics - goPluginMetircsList.push_back(std::move(goMetrics)); - } - } - if (goPluginMetircsList.size() == 0) { - return; - } - - // send plugin-level metrics - if ("sls" == STRING_FLAG(metrics_report_method)) { - std::map goPluginMetircsLogGroupMap; - SerializeGoPluginMetricsListToLogGroupMap(goPluginMetircsList, goPluginMetircsLogGroupMap); - SendToSLS(goPluginMetircsLogGroupMap); - } else if ("file" == STRING_FLAG(metrics_report_method)) { - std::string goPluginMetircsContent; - SerializeGoPluginMetricsListToString(goPluginMetircsList, goPluginMetircsContent); - SendToLocalFile(goPluginMetircsContent, "self-metrics-go"); - } + PushGoCppProvidedMetrics(goCppProvidedMetircsList); + PushGoDirectMetrics(goDirectMetircsList); } void MetricExportor::SendToSLS(std::map& logGroupMap) { @@ -171,32 +148,56 @@ void MetricExportor::SendToLocalFile(std::string& metricsContent, const std::str } } -void MetricExportor::SendGoAgentLevelMetrics(std::map& metrics) { - for (auto metric : metrics) { - if (metric.first == agentLevelMetricKey) { - continue; - } - // if (metric.first == METRIC_AGENT_CPU_GO) { - // mGlobalCpuGo->Set(std::stod(metric.second)); - // } - if (metric.first == METRIC_AGENT_MEMORY_GO) { - mGlobalMemGo->Set(std::stoi(metric.second)); +// metrics from Go that are directly outputted +void MetricExportor::PushGoDirectMetrics(std::vector>& metricsList) { + if (metricsList.size() == 0) { + return; + } + + if ("sls" == STRING_FLAG(metrics_report_method)) { + std::map logGroupMap; + SerializeGoDirectMetricsListToLogGroupMap(metricsList, logGroupMap); + SendToSLS(logGroupMap); + } else if ("file" == STRING_FLAG(metrics_report_method)) { + std::string metricsContent; + SerializeGoDirectMetricsListToString(metricsList, metricsContent); + SendToLocalFile(metricsContent, "self-metrics-go"); + } +} + +// metrics from Go that are provided by cpp +void MetricExportor::PushGoCppProvidedMetrics(std::vector>& metricsList) { + if (metricsList.size() == 0) { + return; + } + + for (auto metrics : metricsList) { + for (auto metric : metrics) { + // if (metric.first == METRIC_AGENT_CPU_GO) { + // mAgentCpuGo->Set(std::stod(metric.second)); + // } + if (metric.first == METRIC_AGENT_MEMORY_GO) { + mAgentMemGo->Set(std::stoi(metric.second)); + } + if (metric.first == METRIC_AGENT_GO_ROUTINES_TOTAL) { + mAgentGoRoutines->Set(std::stoi(metric.second)); + } + LogtailMonitor::GetInstance()->UpdateMetric(metric.first, metric.second); } - LogtailMonitor::GetInstance()->UpdateMetric(metric.first, metric.second); } } -void MetricExportor::SerializeGoPluginMetricsListToLogGroupMap( - std::vector>& goPluginMetircsList, - std::map& goLogGroupMap) { - for (auto& item : goPluginMetircsList) { +void MetricExportor::SerializeGoDirectMetricsListToLogGroupMap( + std::vector>& metricsList, + std::map& logGroupMap) { + for (auto& metrics : metricsList) { std::string configName = ""; std::string region = METRIC_REGION_DEFAULT; { // get the config_name label - for (const auto& pair : item) { - if (pair.first == "label.config_name") { - configName = pair.second; + for (const auto& metric : metrics) { + if (metric.first == "label.config_name") { + configName = metric.second; break; } } @@ -213,37 +214,37 @@ void MetricExportor::SerializeGoPluginMetricsListToLogGroupMap( } } Log* logPtr = nullptr; - auto LogGroupIter = goLogGroupMap.find(region); - if (LogGroupIter != goLogGroupMap.end()) { + auto LogGroupIter = logGroupMap.find(region); + if (LogGroupIter != logGroupMap.end()) { sls_logs::LogGroup* logGroup = LogGroupIter->second; logPtr = logGroup->add_logs(); } else { sls_logs::LogGroup* logGroup = new sls_logs::LogGroup(); logPtr = logGroup->add_logs(); - goLogGroupMap.insert(std::pair(region, logGroup)); + logGroupMap.insert(std::pair(region, logGroup)); } auto now = GetCurrentLogtailTime(); SetLogTime(logPtr, AppConfig::GetInstance()->EnableLogTimeAutoAdjust() ? now.tv_sec + GetTimeDelta() : now.tv_sec); - for (const auto& pair : item) { + for (const auto& metric : metrics) { Log_Content* contentPtr = logPtr->add_contents(); - contentPtr->set_key(pair.first); - contentPtr->set_value(pair.second); + contentPtr->set_key(metric.first); + contentPtr->set_value(metric.second); } } } -void MetricExportor::SerializeGoPluginMetricsListToString( - std::vector>& goPluginMetircsList, std::string& metricsContent) { +void MetricExportor::SerializeGoDirectMetricsListToString(std::vector>& metricsList, + std::string& metricsContent) { std::ostringstream oss; - for (auto& item : goPluginMetircsList) { + for (auto& metrics : metricsList) { Json::Value metricsRecordValue; auto now = GetCurrentLogtailTime(); metricsRecordValue["time"] = AppConfig::GetInstance()->EnableLogTimeAutoAdjust() ? now.tv_sec + GetTimeDelta() : now.tv_sec; - for (const auto& pair : item) { - metricsRecordValue[pair.first] = pair.second; + for (const auto& metric : metrics) { + metricsRecordValue[metric.first] = metric.second; } Json::StreamWriterBuilder writer; writer["indentation"] = ""; diff --git a/core/monitor/MetricExportor.h b/core/monitor/MetricExportor.h index 254d539017..dfb524814a 100644 --- a/core/monitor/MetricExportor.h +++ b/core/monitor/MetricExportor.h @@ -39,19 +39,21 @@ class MetricExportor { // Send Methods void SendToSLS(std::map& logGroupMap); void SendToLocalFile(std::string& metricsContent, const std::string metricsFileNamePrefix); - void SendGoAgentLevelMetrics(std::map& metrics); - // inner methods - void SerializeGoPluginMetricsListToLogGroupMap(std::vector>& goPluginMetircsList, - std::map& goLogGroupMap); - void SerializeGoPluginMetricsListToString(std::vector>& goPluginMetircsList, + // go metrics + void PushGoDirectMetrics(std::vector>& metricsList); + void PushGoCppProvidedMetrics(std::vector>& metricsList); + void SerializeGoDirectMetricsListToLogGroupMap(std::vector>& metricsList, + std::map& logGroupMap); + void SerializeGoDirectMetricsListToString(std::vector>& metricsList, std::string& metricsContent); int32_t mSendInterval; int32_t mLastSendTime; // go process-level metrics - DoubleGaugePtr mGlobalCpuGo; - IntGaugePtr mGlobalMemGo; + DoubleGaugePtr mAgentCpuGo; + IntGaugePtr mAgentMemGo; + IntGaugePtr mAgentGoRoutines; }; } // namespace logtail \ No newline at end of file diff --git a/core/monitor/Monitor.cpp b/core/monitor/Monitor.cpp index 2af97b0cf3..54cf59b0fb 100644 --- a/core/monitor/Monitor.cpp +++ b/core/monitor/Monitor.cpp @@ -33,12 +33,13 @@ #include "common/TimeUtil.h" #include "common/version.h" #include "file_server/event_handler/LogInput.h" -#include "plugin/flusher/sls/FlusherSLS.h" #include "go_pipeline/LogtailPlugin.h" -#include "protobuf/sls/sls_logs.pb.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" +#include "monitor/MetricExportor.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "protobuf/sls/sls_logs.pb.h" #include "runner/FlusherRunner.h" #if defined(__linux__) && !defined(__ANDROID__) #include "ObserverManager.h" @@ -113,9 +114,9 @@ bool LogtailMonitor::Init() { #endif // init metrics - mGlobalCpuGauge = LoongCollectorMonitor::GetInstance()->GetDoubleGauge(METRIC_AGENT_CPU); - mGlobalMemoryGauge = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_MEMORY); - mGlobalUsedSendingConcurrency + mAgentCpuGauge = LoongCollectorMonitor::GetInstance()->GetDoubleGauge(METRIC_AGENT_CPU); + mAgentMemoryGauge = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_MEMORY); + mAgentUsedSendingConcurrency = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_USED_SENDING_CONCURRENCY); // Initialize monitor thread. @@ -177,6 +178,7 @@ void LogtailMonitor::Monitor() { lastCheckHardLimitTime = monitorTime; GetMemStat(); + CalCpuStat(curCpuStat, mCpuStat); if (CheckHardMemLimit()) { LOG_ERROR(sLogger, ("Resource used by program exceeds hard limit", @@ -266,14 +268,14 @@ bool LogtailMonitor::SendStatusProfile(bool suicide) { SetLogTime(logPtr, AppConfig::GetInstance()->EnableLogTimeAutoAdjust() ? now.tv_sec + GetTimeDelta() : now.tv_sec); // CPU usage of Logtail process. AddLogContent(logPtr, "cpu", mCpuStat.mCpuUsage); - mGlobalCpuGauge->Set(mCpuStat.mCpuUsage); + mAgentCpuGauge->Set(mCpuStat.mCpuUsage); #if defined(__linux__) // TODO: Remove this if auto scale is available on Windows. // CPU usage of system. AddLogContent(logPtr, "os_cpu", mOsCpuStatForScale.mOsCpuUsage); #endif // Memory usage of Logtail process. AddLogContent(logPtr, "mem", mMemStat.mRss); - mGlobalMemoryGauge->Set(mMemStat.mRss); + mAgentMemoryGauge->Set(mMemStat.mRss); // The version, uuid of Logtail. AddLogContent(logPtr, "version", ILOGTAIL_VERSION); AddLogContent(logPtr, "uuid", Application::GetInstance()->GetUUID()); @@ -318,7 +320,7 @@ bool LogtailMonitor::SendStatusProfile(bool suicide) { } int32_t usedSendingConcurrency = FlusherRunner::GetInstance()->GetSendingBufferCount(); UpdateMetric("used_sending_concurrency", usedSendingConcurrency); - mGlobalUsedSendingConcurrency->Set(usedSendingConcurrency); + mAgentUsedSendingConcurrency->Set(usedSendingConcurrency); AddLogContent(logPtr, "metric_json", MetricToString()); AddLogContent(logPtr, "status", CheckLogtailStatus()); @@ -712,7 +714,7 @@ void LoongCollectorMonitor::Init() { labels.emplace_back(METRIC_LABEL_UUID, Application::GetInstance()->GetUUID()); labels.emplace_back(METRIC_LABEL_VERSION, ILOGTAIL_VERSION); DynamicMetricLabels dynamicLabels; - dynamicLabels.emplace_back(METRIC_LABEL_PROJECTS, []() -> std::string { return FlusherSLS::GetAllProjects(); }); + dynamicLabels.emplace_back(METRIC_LABEL_PROJECT, []() -> std::string { return FlusherSLS::GetAllProjects(); }); #ifdef __ENTERPRISE__ dynamicLabels.emplace_back(METRIC_LABEL_ALIUIDS, []() -> std::string { return EnterpriseConfigProvider::GetInstance()->GetAliuidSet(); }); @@ -727,6 +729,7 @@ void LoongCollectorMonitor::Init() { // mDoubleGauges[METRIC_AGENT_CPU_GO] = mMetricsRecordRef.CreateDoubleGauge(METRIC_AGENT_CPU_GO); mIntGauges[METRIC_AGENT_MEMORY] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_MEMORY); mIntGauges[METRIC_AGENT_MEMORY_GO] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_MEMORY_GO); + mIntGauges[METRIC_AGENT_GO_ROUTINES_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_GO_ROUTINES_TOTAL); mIntGauges[METRIC_AGENT_OPEN_FD_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_OPEN_FD_TOTAL); mIntGauges[METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL); @@ -759,6 +762,7 @@ void LoongCollectorMonitor::Init() { } void LoongCollectorMonitor::Stop() { + MetricExportor::GetInstance()->PushMetrics(true); } CounterPtr LoongCollectorMonitor::GetCounter(std::string key) { diff --git a/core/monitor/Monitor.h b/core/monitor/Monitor.h index cb8744442c..d0cac3314e 100644 --- a/core/monitor/Monitor.h +++ b/core/monitor/Monitor.h @@ -161,12 +161,12 @@ class LogtailMonitor : public MetricStore { CpuStat mRealtimeCpuStat; // Use to calculate CPU limit, updated regularly (30s by default). CpuStat mCpuStat; - DoubleGaugePtr mGlobalCpuGauge; + DoubleGaugePtr mAgentCpuGauge; // Memory usage statistics. MemStat mMemStat; - IntGaugePtr mGlobalMemoryGauge; + IntGaugePtr mAgentMemoryGauge; - IntGaugePtr mGlobalUsedSendingConcurrency; + IntGaugePtr mAgentUsedSendingConcurrency; // Current scale up level, updated by CheckScaledCpuUsageUpLimit. float mScaledCpuUsageUpLimit; diff --git a/core/pipeline/Pipeline.h b/core/pipeline/Pipeline.h index 43a989fcd6..5ec31d4ab0 100644 --- a/core/pipeline/Pipeline.h +++ b/core/pipeline/Pipeline.h @@ -110,10 +110,10 @@ class Pipeline { friend class InputPrometheusUnittest; friend class ProcessorTagNativeUnittest; friend class FlusherSLSUnittest; - friend class InputEBPFFileSecurityUnittest; - friend class InputEBPFProcessSecurityUnittest; - friend class InputEBPFNetworkSecurityUnittest; - friend class InputEBPFNetworkObserverUnittest; + friend class InputFileSecurityUnittest; + friend class InputProcessSecurityUnittest; + friend class InputNetworkSecurityUnittest; + friend class InputNetworkObserverUnittest; #endif }; diff --git a/core/pipeline/plugin/PluginRegistry.cpp b/core/pipeline/plugin/PluginRegistry.cpp index b831a2a37d..5beeb2ab0e 100644 --- a/core/pipeline/plugin/PluginRegistry.cpp +++ b/core/pipeline/plugin/PluginRegistry.cpp @@ -31,10 +31,10 @@ #include "plugin/input/InputFile.h" #include "plugin/input/InputPrometheus.h" #if defined(__linux__) && !defined(__ANDROID__) -#include "plugin/input/InputEBPFFileSecurity.h" -#include "plugin/input/InputEBPFNetworkObserver.h" -#include "plugin/input/InputEBPFNetworkSecurity.h" -#include "plugin/input/InputEBPFProcessSecurity.h" +#include "plugin/input/InputFileSecurity.h" +#include "plugin/input/InputNetworkObserver.h" +#include "plugin/input/InputNetworkSecurity.h" +#include "plugin/input/InputProcessSecurity.h" #include "plugin/input/InputObserverNetwork.h" #ifdef __ENTERPRISE__ #include "plugin/input/InputStream.h" @@ -131,10 +131,10 @@ void PluginRegistry::LoadStaticPlugins() { RegisterInputCreator(new StaticInputCreator()); #if defined(__linux__) && !defined(__ANDROID__) RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); + RegisterInputCreator(new StaticInputCreator()); + RegisterInputCreator(new StaticInputCreator()); + RegisterInputCreator(new StaticInputCreator()); + RegisterInputCreator(new StaticInputCreator()); RegisterInputCreator(new StaticInputCreator()); #ifdef __ENTERPRISE__ RegisterInputCreator(new StaticInputCreator()); diff --git a/core/plugin/input/InputEBPFFileSecurity.cpp b/core/plugin/input/InputFileSecurity.cpp similarity index 85% rename from core/plugin/input/InputEBPFFileSecurity.cpp rename to core/plugin/input/InputFileSecurity.cpp index ed44311a4f..576f892f11 100644 --- a/core/plugin/input/InputEBPFFileSecurity.cpp +++ b/core/plugin/input/InputFileSecurity.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/input/InputEBPFFileSecurity.h" +#include "plugin/input/InputFileSecurity.h" // #include "ebpf/security/SecurityServer.h" #include "ebpf/include/export.h" @@ -23,9 +23,9 @@ using namespace std; namespace logtail { -const std::string InputEBPFFileSecurity::sName = "input_ebpf_fileprobe_security"; +const std::string InputFileSecurity::sName = "input_file_security"; -bool InputEBPFFileSecurity::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { +bool InputFileSecurity::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { std::string prev_pipeline_name = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::FILE_SECURITY); std::string pipeline_name = mContext->GetConfigName(); if (prev_pipeline_name.size() && prev_pipeline_name != pipeline_name) { @@ -35,11 +35,11 @@ bool InputEBPFFileSecurity::Init(const Json::Value& config, Json::Value& optiona return mSecurityOptions.Init(ebpf::SecurityProbeType::FILE, config, mContext, sName); } -bool InputEBPFFileSecurity::Start() { +bool InputFileSecurity::Start() { return ebpf::eBPFServer::GetInstance()->EnablePlugin(mContext->GetConfigName(), mIndex, nami::PluginType::FILE_SECURITY, mContext, &mSecurityOptions); } -bool InputEBPFFileSecurity::Stop(bool isPipelineRemoving) { +bool InputFileSecurity::Stop(bool isPipelineRemoving) { if (!isPipelineRemoving) { ebpf::eBPFServer::GetInstance()->SuspendPlugin(mContext->GetConfigName(), nami::PluginType::FILE_SECURITY); return true; diff --git a/core/plugin/input/InputEBPFFileSecurity.h b/core/plugin/input/InputFileSecurity.h similarity index 96% rename from core/plugin/input/InputEBPFFileSecurity.h rename to core/plugin/input/InputFileSecurity.h index 75a0cd9f1f..a4e7aa553a 100644 --- a/core/plugin/input/InputEBPFFileSecurity.h +++ b/core/plugin/input/InputFileSecurity.h @@ -24,7 +24,7 @@ namespace logtail { -class InputEBPFFileSecurity : public Input { +class InputFileSecurity : public Input { public: static const std::string sName; diff --git a/core/plugin/input/InputEBPFNetworkObserver.cpp b/core/plugin/input/InputNetworkObserver.cpp similarity index 83% rename from core/plugin/input/InputEBPFNetworkObserver.cpp rename to core/plugin/input/InputNetworkObserver.cpp index a72b43a2ca..8c1c836b5d 100644 --- a/core/plugin/input/InputEBPFNetworkObserver.cpp +++ b/core/plugin/input/InputNetworkObserver.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/input/InputEBPFNetworkObserver.h" +#include "plugin/input/InputNetworkObserver.h" #include "ebpf/include/export.h" #include "ebpf/eBPFServer.h" @@ -23,9 +23,9 @@ using namespace std; namespace logtail { -const std::string InputEBPFNetworkObserver::sName = "input_ebpf_sockettraceprobe_observer"; +const std::string InputNetworkObserver::sName = "input_network_observer"; -bool InputEBPFNetworkObserver::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { +bool InputNetworkObserver::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { std::string prev_pipeline_name = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_OBSERVE); std::string pipeline_name = mContext->GetConfigName(); if (prev_pipeline_name.size() && prev_pipeline_name != pipeline_name) { @@ -35,11 +35,11 @@ bool InputEBPFNetworkObserver::Init(const Json::Value& config, Json::Value& opti return ebpf::InitObserverNetworkOption(config, mNetworkOption, mContext, sName); } -bool InputEBPFNetworkObserver::Start() { +bool InputNetworkObserver::Start() { return ebpf::eBPFServer::GetInstance()->EnablePlugin(mContext->GetConfigName(), mIndex, nami::PluginType::NETWORK_OBSERVE, mContext, &mNetworkOption); } -bool InputEBPFNetworkObserver::Stop(bool isPipelineRemoving) { +bool InputNetworkObserver::Stop(bool isPipelineRemoving) { if (!isPipelineRemoving) { ebpf::eBPFServer::GetInstance()->SuspendPlugin(mContext->GetConfigName(), nami::PluginType::NETWORK_OBSERVE); return true; diff --git a/core/plugin/input/InputEBPFNetworkObserver.h b/core/plugin/input/InputNetworkObserver.h similarity index 96% rename from core/plugin/input/InputEBPFNetworkObserver.h rename to core/plugin/input/InputNetworkObserver.h index 10ebedbeea..78388c57bd 100644 --- a/core/plugin/input/InputEBPFNetworkObserver.h +++ b/core/plugin/input/InputNetworkObserver.h @@ -24,7 +24,7 @@ namespace logtail { -class InputEBPFNetworkObserver : public Input { +class InputNetworkObserver : public Input { public: static const std::string sName; diff --git a/core/plugin/input/InputEBPFNetworkSecurity.cpp b/core/plugin/input/InputNetworkSecurity.cpp similarity index 84% rename from core/plugin/input/InputEBPFNetworkSecurity.cpp rename to core/plugin/input/InputNetworkSecurity.cpp index d9ff9ec188..922d62e679 100644 --- a/core/plugin/input/InputEBPFNetworkSecurity.cpp +++ b/core/plugin/input/InputNetworkSecurity.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/input/InputEBPFNetworkSecurity.h" +#include "plugin/input/InputNetworkSecurity.h" #include "ebpf/include/export.h" #include "ebpf/eBPFServer.h" @@ -22,12 +22,12 @@ using namespace std; namespace logtail { -const std::string InputEBPFNetworkSecurity::sName = "input_ebpf_sockettraceprobe_security"; +const std::string InputNetworkSecurity::sName = "input_network_security"; // enable: init -> start // update: init -> stop(false) -> start // stop: stop(true) -bool InputEBPFNetworkSecurity::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { +bool InputNetworkSecurity::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { std::string prev_pipeline_name = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); std::string pipeline_name = mContext->GetConfigName(); if (prev_pipeline_name.size() && prev_pipeline_name != pipeline_name) { @@ -38,11 +38,11 @@ bool InputEBPFNetworkSecurity::Init(const Json::Value& config, Json::Value& opti return mSecurityOptions.Init(ebpf::SecurityProbeType::NETWORK, config, mContext, sName); } -bool InputEBPFNetworkSecurity::Start() { +bool InputNetworkSecurity::Start() { return ebpf::eBPFServer::GetInstance()->EnablePlugin(mContext->GetConfigName(), mIndex, nami::PluginType::NETWORK_SECURITY, mContext, &mSecurityOptions); } -bool InputEBPFNetworkSecurity::Stop(bool isPipelineRemoving) { +bool InputNetworkSecurity::Stop(bool isPipelineRemoving) { if (!isPipelineRemoving) { ebpf::eBPFServer::GetInstance()->SuspendPlugin(mContext->GetConfigName(), nami::PluginType::NETWORK_SECURITY); return true; diff --git a/core/plugin/input/InputEBPFNetworkSecurity.h b/core/plugin/input/InputNetworkSecurity.h similarity index 95% rename from core/plugin/input/InputEBPFNetworkSecurity.h rename to core/plugin/input/InputNetworkSecurity.h index 125fcbff33..4e7c441e7f 100644 --- a/core/plugin/input/InputEBPFNetworkSecurity.h +++ b/core/plugin/input/InputNetworkSecurity.h @@ -23,7 +23,7 @@ namespace logtail { -class InputEBPFNetworkSecurity : public Input { +class InputNetworkSecurity : public Input { public: static const std::string sName; diff --git a/core/plugin/input/InputEBPFProcessSecurity.cpp b/core/plugin/input/InputProcessSecurity.cpp similarity index 83% rename from core/plugin/input/InputEBPFProcessSecurity.cpp rename to core/plugin/input/InputProcessSecurity.cpp index 04b9696929..38e41d5c59 100644 --- a/core/plugin/input/InputEBPFProcessSecurity.cpp +++ b/core/plugin/input/InputProcessSecurity.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/input/InputEBPFProcessSecurity.h" +#include "plugin/input/InputProcessSecurity.h" #include "ebpf/include/export.h" #include "ebpf/eBPFServer.h" @@ -21,9 +21,9 @@ using namespace std; namespace logtail { -const std::string InputEBPFProcessSecurity::sName = "input_ebpf_processprobe_security"; +const std::string InputProcessSecurity::sName = "input_process_security"; -bool InputEBPFProcessSecurity::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { +bool InputProcessSecurity::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { std::string prev_pipeline_name = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); std::string pipeline_name = mContext->GetConfigName(); if (prev_pipeline_name.size() && prev_pipeline_name != pipeline_name) { @@ -33,11 +33,11 @@ bool InputEBPFProcessSecurity::Init(const Json::Value& config, Json::Value& opti return mSecurityOptions.Init(ebpf::SecurityProbeType::PROCESS, config, mContext, sName); } -bool InputEBPFProcessSecurity::Start() { +bool InputProcessSecurity::Start() { return ebpf::eBPFServer::GetInstance()->EnablePlugin(mContext->GetConfigName(), mIndex, nami::PluginType::PROCESS_SECURITY,mContext, &mSecurityOptions); } -bool InputEBPFProcessSecurity::Stop(bool isPipelineRemoving) { +bool InputProcessSecurity::Stop(bool isPipelineRemoving) { if (!isPipelineRemoving) { ebpf::eBPFServer::GetInstance()->SuspendPlugin(mContext->GetConfigName(), nami::PluginType::PROCESS_SECURITY); return true; diff --git a/core/plugin/input/InputEBPFProcessSecurity.h b/core/plugin/input/InputProcessSecurity.h similarity index 95% rename from core/plugin/input/InputEBPFProcessSecurity.h rename to core/plugin/input/InputProcessSecurity.h index c89210c986..afcffcb439 100644 --- a/core/plugin/input/InputEBPFProcessSecurity.h +++ b/core/plugin/input/InputProcessSecurity.h @@ -24,7 +24,7 @@ namespace logtail { -class InputEBPFProcessSecurity : public Input { +class InputProcessSecurity : public Input { public: static const std::string sName; diff --git a/core/runner/LogProcess.cpp b/core/runner/LogProcess.cpp index dfd7b5e87e..c68196f8b5 100644 --- a/core/runner/LogProcess.cpp +++ b/core/runner/LogProcess.cpp @@ -169,10 +169,10 @@ void* LogProcess::ProcessLoop(int32_t threadNo) { // update process queue status uint32_t InvalidProcessQueueTotal = ProcessQueueManager::GetInstance()->GetInvalidCnt(); sMonitor->UpdateMetric("process_queue_full", InvalidProcessQueueTotal); - mGlobalProcessQueueFullTotal->Set(InvalidProcessQueueTotal); + mAgentProcessQueueFullTotal->Set(InvalidProcessQueueTotal); uint32_t ProcessQueueTotal = ProcessQueueManager::GetInstance()->GetCnt(); sMonitor->UpdateMetric("process_queue_total", ProcessQueueTotal); - mGlobalProcessQueueTotal->Set(ProcessQueueTotal); + mAgentProcessQueueTotal->Set(ProcessQueueTotal); if (ExactlyOnceQueueManager::GetInstance()->GetProcessQueueCnt() > 0) { sMonitor->UpdateMetric("eo_process_queue_full", ExactlyOnceQueueManager::GetInstance()->GetInvalidProcessQueueCnt()); diff --git a/core/runner/LogProcess.h b/core/runner/LogProcess.h index c5ddf02c67..c0bd77e9ad 100644 --- a/core/runner/LogProcess.h +++ b/core/runner/LogProcess.h @@ -60,8 +60,8 @@ class LogProcess : public LogRunnable { std::atomic_bool* mThreadFlags; ReadWriteLock mAccessProcessThreadRWL; - IntGaugePtr mGlobalProcessQueueFullTotal; - IntGaugePtr mGlobalProcessQueueTotal; + IntGaugePtr mAgentProcessQueueFullTotal; + IntGaugePtr mAgentProcessQueueTotal; }; } // namespace logtail \ No newline at end of file diff --git a/core/unittest/common/LogFileOperatorUnittest.cpp b/core/unittest/common/LogFileOperatorUnittest.cpp index 2e5f1239cd..b5b27ce7a1 100644 --- a/core/unittest/common/LogFileOperatorUnittest.cpp +++ b/core/unittest/common/LogFileOperatorUnittest.cpp @@ -124,7 +124,6 @@ void LogFileOperatorUnittest::MockTruncate(const char* path, off_t keep_length, void LogFileOperatorUnittest::TestCons() { LogFileOperator logFileOp; - APSARA_TEST_EQUAL(logFileOp.mFuseMode, false); APSARA_TEST_EQUAL(logFileOp.mFd, -1); APSARA_TEST_EQUAL(logFileOp.IsOpen(), false); } @@ -143,12 +142,10 @@ void LogFileOperatorUnittest::TestOpen() { fd = logFileOp.Open(""); APSARA_TEST_TRUE(fd < 0); APSARA_TEST_EQUAL(logFileOp.IsOpen(), false); - APSARA_TEST_EQUAL(logFileOp.mFuseMode, false); fd = logFileOp.Open(file.c_str()); APSARA_TEST_TRUE(fd < 0); APSARA_TEST_EQUAL(logFileOp.IsOpen(), false); - APSARA_TEST_EQUAL(logFileOp.mFuseMode, false); { std::ofstream(file, std::ios_base::binary) << ""; } @@ -156,7 +153,6 @@ void LogFileOperatorUnittest::TestOpen() { fd = logFileOp.Open(file.c_str()); APSARA_TEST_TRUE(fd >= 0); APSARA_TEST_EQUAL(logFileOp.IsOpen(), true); - APSARA_TEST_EQUAL(logFileOp.mFuseMode, false); } #if defined(ENABLE_FUSE) diff --git a/core/unittest/ebpf/eBPFServerUnittest.cpp b/core/unittest/ebpf/eBPFServerUnittest.cpp index 03fdcd15d2..8a45a73242 100644 --- a/core/unittest/ebpf/eBPFServerUnittest.cpp +++ b/core/unittest/ebpf/eBPFServerUnittest.cpp @@ -513,7 +513,7 @@ void eBPFServerUnittest::TestInit() { void eBPFServerUnittest::TestEnableNetworkPlugin() { std::string configStr = R"( { - "Type": "input_ebpf_sockettraceprobe_observer", + "Type": "input_network_observer", "ProbeConfig": { "EnableLog": true, @@ -602,17 +602,7 @@ void eBPFServerUnittest::TestEnableNetworkPlugin() { void eBPFServerUnittest::TestEnableProcessPlugin() { std::string configStr = R"( { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": [ - "sys_enter_execve", - "disassociate_ctty", - "acct_process", - "wake_up_new_task" - ] - } - ] + "Type": "input_process_security", } )"; @@ -621,7 +611,7 @@ void eBPFServerUnittest::TestEnableProcessPlugin() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); std::cout << "1" << std::endl; SecurityOptions security_options; - security_options.Init(SecurityProbeType::PROCESS, configJson, &ctx, "input_ebpf_processprobe_security"); + security_options.Init(SecurityProbeType::PROCESS, configJson, &ctx, "input_process_security"); bool res = ebpf::eBPFServer::GetInstance()->EnablePlugin( "test", 0, nami::PluginType::PROCESS_SECURITY, @@ -635,7 +625,7 @@ void eBPFServerUnittest::TestEnableProcessPlugin() { EXPECT_TRUE(process_conf.process_security_cb_ != nullptr); LOG_WARNING(sLogger, ("process_conf.options_ size", process_conf.options_.size())); EXPECT_EQ(process_conf.options_.size(), 1); - EXPECT_EQ(process_conf.options_[0].call_names_.size(), 4); + EXPECT_EQ(process_conf.options_[0].call_names_.size(), 5); // do suspend ebpf::eBPFServer::GetInstance()->SuspendPlugin("test", nami::PluginType::PROCESS_SECURITY); @@ -662,25 +652,16 @@ void eBPFServerUnittest::TestEnableProcessPlugin() { void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { std::string configStr = R"( { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - }, - { - "CallNameFilter": ["tcp_sendmsg"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80] - } + "Type": "input_network_security", + "ProbeConfig": + { + "AddrFilter": { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": [80], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] } - ] + } } )"; @@ -688,9 +669,9 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { Json::Value configJson; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); SecurityOptions security_options; - security_options.Init(SecurityProbeType::NETWORK, configJson, &ctx, "input_ebpf_sockettraceprobe_security"); + security_options.Init(SecurityProbeType::NETWORK, configJson, &ctx, "input_network_security"); bool res = ebpf::eBPFServer::GetInstance()->EnablePlugin( - "input_ebpf_sockettraceprobe_security", 5, + "input_network_security", 5, nami::PluginType::NETWORK_SECURITY, &ctx, &security_options); @@ -700,8 +681,8 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_ENABLE_PROBE); auto inner_conf = std::get(conf->config_); EXPECT_TRUE(inner_conf.network_security_cb_ != nullptr); - EXPECT_EQ(inner_conf.options_.size(), 2); - EXPECT_EQ(inner_conf.options_[0].call_names_.size(), 2); + EXPECT_EQ(inner_conf.options_.size(), 1); + EXPECT_EQ(inner_conf.options_[0].call_names_.size(), 3); auto filter = std::get(inner_conf.options_[0].filter_); EXPECT_EQ(filter.mDestAddrList.size(), 2); EXPECT_EQ(filter.mDestPortList.size(), 1); @@ -715,7 +696,7 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { EXPECT_TRUE(ebpf::eBPFServer::GetInstance()->mSourceManager->mRunning[int(nami::PluginType::NETWORK_SECURITY)]); res = ebpf::eBPFServer::GetInstance()->EnablePlugin( - "input_ebpf_sockettraceprobe_security", 0, + "input_network_security", 0, nami::PluginType::NETWORK_SECURITY, &ctx, &security_options); @@ -736,17 +717,15 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { void eBPFServerUnittest::TestEnableFileSecurePlugin() { std::string configStr = R"( { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["security_file_permission"], - "FilePathFilter": [ - "/etc/passwd", - "/etc/shadow", - "/bin" - ] - } - ] + "Type": "input_file_security", + "ProbeConfig": + { + "FilePathFilter": [ + "/etc/passwd", + "/etc/shadow", + "/bin" + ] + } } )"; @@ -755,9 +734,9 @@ void eBPFServerUnittest::TestEnableFileSecurePlugin() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); std::cout << "1" << std::endl; SecurityOptions security_options; - security_options.Init(SecurityProbeType::FILE, configJson, &ctx, "input_ebpf_fileprobe_security"); + security_options.Init(SecurityProbeType::FILE, configJson, &ctx, "input_file_security"); bool res = ebpf::eBPFServer::GetInstance()->EnablePlugin( - "input_ebpf_fileprobe_security", 0, + "input_file_security", 0, nami::PluginType::FILE_SECURITY, &ctx, &security_options); @@ -769,7 +748,7 @@ void eBPFServerUnittest::TestEnableFileSecurePlugin() { auto inner_conf = std::get(conf->config_); EXPECT_TRUE(inner_conf.file_security_cb_ != nullptr); EXPECT_EQ(inner_conf.options_.size(), 1); - EXPECT_EQ(inner_conf.options_[0].call_names_.size(), 1); + EXPECT_EQ(inner_conf.options_[0].call_names_.size(), 3); auto filter = std::get(inner_conf.options_[0].filter_); EXPECT_EQ(filter.mFilePathList.size(), 3); EXPECT_EQ(filter.mFilePathList[0], "/etc/passwd"); @@ -782,7 +761,7 @@ void eBPFServerUnittest::TestEnableFileSecurePlugin() { EXPECT_TRUE(ebpf::eBPFServer::GetInstance()->mSourceManager->mRunning[int(nami::PluginType::FILE_SECURITY)]); res = ebpf::eBPFServer::GetInstance()->EnablePlugin( - "input_ebpf_fileprobe_security", 0, + "input_file_security", 0, nami::PluginType::FILE_SECURITY, &ctx, &security_options); diff --git a/core/unittest/input/CMakeLists.txt b/core/unittest/input/CMakeLists.txt index b57fac2550..49129244cf 100644 --- a/core/unittest/input/CMakeLists.txt +++ b/core/unittest/input/CMakeLists.txt @@ -24,16 +24,16 @@ target_link_libraries(input_container_stdio_unittest ${UT_BASE_TARGET}) add_executable(input_prometheus_unittest InputPrometheusUnittest.cpp) target_link_libraries(input_prometheus_unittest ${UT_BASE_TARGET}) -add_executable(input_ebpf_file_security_unittest InputEBPFFileSecurityUnittest.cpp) +add_executable(input_ebpf_file_security_unittest InputFileSecurityUnittest.cpp) target_link_libraries(input_ebpf_file_security_unittest unittest_base) -add_executable(input_ebpf_process_security_unittest InputEBPFProcessSecurityUnittest.cpp) +add_executable(input_ebpf_process_security_unittest InputProcessSecurityUnittest.cpp) target_link_libraries(input_ebpf_process_security_unittest unittest_base) -add_executable(input_ebpf_network_security_unittest InputEBPFNetworkSecurityUnittest.cpp) +add_executable(input_ebpf_network_security_unittest InputNetworkSecurityUnittest.cpp) target_link_libraries(input_ebpf_network_security_unittest unittest_base) -add_executable(input_ebpf_network_observer_unittest InputEBPFNetworkObserverUnittest.cpp) +add_executable(input_ebpf_network_observer_unittest InputNetworkObserverUnittest.cpp) target_link_libraries(input_ebpf_network_observer_unittest unittest_base) include(GoogleTest) diff --git a/core/unittest/input/InputEBPFNetworkSecurityUnittest.cpp b/core/unittest/input/InputEBPFNetworkSecurityUnittest.cpp deleted file mode 100644 index 48e3eeace7..0000000000 --- a/core/unittest/input/InputEBPFNetworkSecurityUnittest.cpp +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2023 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include "app_config/AppConfig.h" -#include "common/JsonUtil.h" -#include "ebpf/config.h" -#include "plugin/input/InputEBPFNetworkSecurity.h" -#include "pipeline/Pipeline.h" -#include "pipeline/PipelineContext.h" -#include "unittest/Unittest.h" -#include "ebpf/eBPFServer.h" - -using namespace std; - -namespace logtail { - -class InputEBPFNetworkSecurityUnittest : public testing::Test { -public: - void OnSuccessfulInit(); - void OnFailedInit(); - void OnSuccessfulStart(); - void OnSuccessfulStop(); - // void OnPipelineUpdate(); - -protected: - void SetUp() override { - p.mName = "test_config"; - ctx.SetConfigName("test_config"); - ctx.SetPipeline(p); - ebpf::eBPFServer::GetInstance()->Init(); - } - -private: - Pipeline p; - PipelineContext ctx; -}; - -void InputEBPFNetworkSecurityUnittest::OnSuccessfulInit() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - - // valid optional param - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - }, - { - "CallNameFilter": ["tcp_sendmsg"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80] - } - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_security"); - nami::SecurityNetworkFilter thisFilter1 - = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL("tcp_connect", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("tcp_close", input->mSecurityOptions.mOptionList[0].call_names_[1]); - APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter1.mDestAddrList[0]); - APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter1.mDestAddrList[1]); - APSARA_TEST_EQUAL(1, thisFilter1.mDestPortList.size()); - APSARA_TEST_EQUAL("127.0.0.1/8", thisFilter1.mSourceAddrBlackList[0]); - APSARA_TEST_EQUAL(9300, thisFilter1.mSourcePortBlackList[0]); - APSARA_TEST_EQUAL("tcp_sendmsg", input->mSecurityOptions.mOptionList[1].call_names_[0]); - nami::SecurityNetworkFilter thisFilter2 - = std::get(input->mSecurityOptions.mOptionList[1].filter_); - APSARA_TEST_EQUAL("tcp_sendmsg", input->mSecurityOptions.mOptionList[1].call_names_[0]); - APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter2.mDestAddrList[0]); - APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter2.mDestAddrList[1]); - APSARA_TEST_EQUAL(80, thisFilter2.mDestPortList[0]); -} - -void InputEBPFNetworkSecurityUnittest::OnFailedInit() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - - // invalid optional param - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": ["80"], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_security"); - nami::SecurityNetworkFilter thisFilter1 - = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL("tcp_connect", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter1.mDestAddrList[0]); - APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter1.mDestAddrList[1]); - APSARA_TEST_EQUAL(0, thisFilter1.mDestPortList.size()); - APSARA_TEST_EQUAL("127.0.0.1/8", thisFilter1.mSourceAddrBlackList[0]); - APSARA_TEST_EQUAL(9300, thisFilter1.mSourcePortBlackList[0]); - - // duplicate callname - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - }, - { - "CallNameFilter": ["tcp_connect"], - "AddrFilter": { - "DestAddrList": ["10.0.0.1/8"], - "DestPortList": [70] - } - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_security"); - APSARA_TEST_EQUAL(1, input->mSecurityOptions.mOptionList.size()); - APSARA_TEST_EQUAL("tcp_connect", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("tcp_close", input->mSecurityOptions.mOptionList[0].call_names_[1]); - nami::SecurityNetworkFilter thisFilter2 - = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter2.mDestAddrList[0]); - APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter2.mDestAddrList[1]); - APSARA_TEST_EQUAL(80, thisFilter2.mDestPortList[0]); - - // one duplicate callname of two - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - }, - { - "CallNameFilter": ["tcp_connect", "tcp_sendmsg"], - "AddrFilter": { - "DestAddrList": ["10.0.0.1/8"], - "DestPortList": [70] - } - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_security"); - APSARA_TEST_EQUAL(2, input->mSecurityOptions.mOptionList.size()); - APSARA_TEST_EQUAL("tcp_connect", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("tcp_close", input->mSecurityOptions.mOptionList[0].call_names_[1]); - nami::SecurityNetworkFilter thisFilter3 - = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter3.mDestAddrList[0]); - APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter3.mDestAddrList[1]); - APSARA_TEST_EQUAL(80, thisFilter3.mDestPortList[0]); - APSARA_TEST_EQUAL("tcp_sendmsg", input->mSecurityOptions.mOptionList[1].call_names_[0]); - nami::SecurityNetworkFilter thisFilter4 - = std::get(input->mSecurityOptions.mOptionList[1].filter_); - APSARA_TEST_EQUAL("10.0.0.1/8", thisFilter4.mDestAddrList[0]); - APSARA_TEST_EQUAL(70, thisFilter4.mDestPortList[0]); - - // error param level - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": ["80"], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - nami::SecurityNetworkFilter thisFilter5 - = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL(thisFilter5.mDestAddrList.size(), 0); - APSARA_TEST_EQUAL(thisFilter5.mDestPortList.size(), 0); - APSARA_TEST_EQUAL(thisFilter5.mSourceAddrBlackList.size(), 0); - APSARA_TEST_EQUAL(thisFilter5.mSourcePortBlackList.size(), 0); - - // valid and invalid optional param - // if the optional param in a list is invalid, the valid param after it will be read - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [40, "80", 160], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - nami::SecurityNetworkFilter thisFilter6 - = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL(2, thisFilter6.mDestAddrList.size()); - - // invalid callname - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["udp"], - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(1, input->mSecurityOptions.mOptionList.size()); - APSARA_TEST_EQUAL(3, input->mSecurityOptions.mOptionList[0].call_names_.size()); -} - -void InputEBPFNetworkSecurityUnittest::OnSuccessfulStart() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - }, - { - "CallNameFilter": ["tcp_sendmsg"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80] - } - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_TRUE(input->Start()); - string serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); - string pipelineName = input->GetContext().GetConfigName(); - APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); -} - -void InputEBPFNetworkSecurityUnittest::OnSuccessfulStop() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - - configStr = R"( - { - "Type": "input_ebpf_sockettraceprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["tcp_connect", "tcp_close"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80], - "SourceAddrBlackList": ["127.0.0.1/8"], - "SourcePortBlackList": [9300] - } - }, - { - "CallNameFilter": ["tcp_sendmsg"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80] - } - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_TRUE(input->Start()); - string serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); - string pipelineName = input->GetContext().GetConfigName(); - APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); - APSARA_TEST_TRUE(input->Stop(false)); - serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); - APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); - APSARA_TEST_TRUE(input->Stop(true)); - serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); - APSARA_TEST_TRUE(serverPipelineName.empty()); -} - -UNIT_TEST_CASE(InputEBPFNetworkSecurityUnittest, OnSuccessfulInit) -UNIT_TEST_CASE(InputEBPFNetworkSecurityUnittest, OnFailedInit) -UNIT_TEST_CASE(InputEBPFNetworkSecurityUnittest, OnSuccessfulStart) -UNIT_TEST_CASE(InputEBPFNetworkSecurityUnittest, OnSuccessfulStop) -// UNIT_TEST_CASE(InputEBPFNetworkSecurityUnittest, OnPipelineUpdate) - -} // namespace logtail - -UNIT_TEST_MAIN diff --git a/core/unittest/input/InputEBPFProcessSecurityUnittest.cpp b/core/unittest/input/InputEBPFProcessSecurityUnittest.cpp deleted file mode 100644 index 7fa83255d4..0000000000 --- a/core/unittest/input/InputEBPFProcessSecurityUnittest.cpp +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2023 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include "app_config/AppConfig.h" -#include "common/JsonUtil.h" -#include "ebpf/config.h" -#include "plugin/input/InputEBPFProcessSecurity.h" -#include "pipeline/Pipeline.h" -#include "pipeline/PipelineContext.h" -#include "unittest/Unittest.h" -#include "ebpf/eBPFServer.h" - -using namespace std; - -namespace logtail { - -class InputEBPFProcessSecurityUnittest : public testing::Test { -public: - void OnSuccessfulInit(); - void OnFailedInit(); - void OnSuccessfulStart(); - void OnSuccessfulStop(); - // void OnPipelineUpdate(); - -protected: - void SetUp() override { - p.mName = "test_config"; - ctx.SetConfigName("test_config"); - ctx.SetPipeline(p); - ebpf::eBPFServer::GetInstance()->Init(); - } - -private: - Pipeline p; - PipelineContext ctx; -}; - -void InputEBPFProcessSecurityUnittest::OnSuccessfulInit() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - - // valid param - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": [ - "sys_enter_execve", - "disassociate_ctty", - "acct_process", - "wake_up_new_task" - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_processprobe_security"); - // four callnames - APSARA_TEST_EQUAL("sys_enter_execve", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("disassociate_ctty", input->mSecurityOptions.mOptionList[0].call_names_[1]); - APSARA_TEST_EQUAL("acct_process", input->mSecurityOptions.mOptionList[0].call_names_[2]); - APSARA_TEST_EQUAL("wake_up_new_task", input->mSecurityOptions.mOptionList[0].call_names_[3]); - // no general filter, default is monostate - APSARA_TEST_EQUAL(std::holds_alternative(input->mSecurityOptions.mOptionList[0].filter_), true); -} - -void InputEBPFProcessSecurityUnittest::OnFailedInit() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - - // no probeconfig - configStr = R"( - { - "Type": "input_ebpf_processprobe_security" - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // probeconfig typo error - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfiggg": [ - { - "CallNameFilter": [ - "sys_enter_execve", - "disassociate_ctty", - "acct_process", - "wake_up_new_task" - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // probeconfig type error - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": { - "CallNameFilter": [ - "sys_enter_execve", - "disassociate_ctty", - "acct_process", - "wake_up_new_task" - ] - } - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // no callname - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // callname typo error - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallNameeee": [ - "sys_enter_execve", - "disassociate_ctty", - "acct_process", - "wake_up_new_task" - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // callname type error - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallName": "sys_enter_execve" - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // callname element type error at the first element - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallName": [ - 1, - "disassociate_ctty", - "acct_process", - "wake_up_new_task" - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // callname element type error at the last element - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallName": [ - "disassociate_ctty", - "acct_process", - "wake_up_new_task", - 1 - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // null callname - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": [ - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // invalid callname - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": [ - "sys_enter_execve_error" - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - - // invalid callname of two - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": [ - "sys_enter_execve_error", - "disassociate_ctty", - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); - APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_[0], "disassociate_ctty"); -} - -void InputEBPFProcessSecurityUnittest::OnSuccessfulStart() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": [ - "sys_enter_execve", - "disassociate_ctty", - "acct_process", - "wake_up_new_task" - ] - - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_TRUE(input->Start()); - string serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); - string pipelineName = input->GetContext().GetConfigName(); - APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); -} - -void InputEBPFProcessSecurityUnittest::OnSuccessfulStop() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - - configStr = R"( - { - "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": [ - "sys_enter_execve", - "disassociate_ctty", - "acct_process", - "wake_up_new_task" - ] - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFProcessSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_TRUE(input->Start()); - string serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); - string pipelineName = input->GetContext().GetConfigName(); - APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); - APSARA_TEST_TRUE(input->Stop(false)); - serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); - APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); - APSARA_TEST_TRUE(input->Stop(true)); - serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); - APSARA_TEST_TRUE(serverPipelineName.empty()); -} - -UNIT_TEST_CASE(InputEBPFProcessSecurityUnittest, OnSuccessfulInit) -UNIT_TEST_CASE(InputEBPFProcessSecurityUnittest, OnFailedInit) -UNIT_TEST_CASE(InputEBPFProcessSecurityUnittest, OnSuccessfulStart) -UNIT_TEST_CASE(InputEBPFProcessSecurityUnittest, OnSuccessfulStop) -// UNIT_TEST_CASE(InputEBPFProcessSecurityUnittest, OnPipelineUpdate) - -} // namespace logtail - -UNIT_TEST_MAIN diff --git a/core/unittest/input/InputEBPFFileSecurityUnittest.cpp b/core/unittest/input/InputFileSecurityUnittest.cpp similarity index 52% rename from core/unittest/input/InputEBPFFileSecurityUnittest.cpp rename to core/unittest/input/InputFileSecurityUnittest.cpp index bdee6ecfa5..61ee2276db 100644 --- a/core/unittest/input/InputEBPFFileSecurityUnittest.cpp +++ b/core/unittest/input/InputFileSecurityUnittest.cpp @@ -19,7 +19,7 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "ebpf/config.h" -#include "plugin/input/InputEBPFFileSecurity.h" +#include "plugin/input/InputFileSecurity.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" #include "unittest/Unittest.h" @@ -29,8 +29,10 @@ using namespace std; namespace logtail { -class InputEBPFFileSecurityUnittest : public testing::Test { +class InputFileSecurityUnittest : public testing::Test { public: + void TestName(); + void TestSupportAck(); void OnSuccessfulInit(); void OnFailedInit(); void OnSuccessfulStart(); @@ -50,176 +52,151 @@ class InputEBPFFileSecurityUnittest : public testing::Test { PipelineContext ctx; }; -void InputEBPFFileSecurityUnittest::OnSuccessfulInit() { - unique_ptr input; +void InputFileSecurityUnittest::TestName() { + InputFileSecurity input; + std::string name = input.Name(); + APSARA_TEST_EQUAL(name, "input_file_security"); +} + +void InputFileSecurityUnittest::TestSupportAck() { + InputFileSecurity input; + bool supportAck = input.SupportAck(); + APSARA_TEST_FALSE(supportAck); +} + +void InputFileSecurityUnittest::OnSuccessfulInit() { + unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; // only mandatory param configStr = R"( { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["security_file_permission"], - "FilePathFilter": [ - "/etc", - "/bin" - ] - } - ] + "Type": "input_file_security", + "ProbeConfig": + { + "FilePathFilter": [ + "/etc", + "/bin" + ] + } } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFFileSecurity()); + input.reset(new InputFileSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); + APSARA_TEST_EQUAL(input->sName, "input_file_security"); nami::SecurityFileFilter thisFilter1 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityProbeType::FILE, input->mSecurityOptions.filter_Type); - APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); APSARA_TEST_EQUAL("/etc", thisFilter1.mFilePathList[0]); APSARA_TEST_EQUAL("/bin", thisFilter1.mFilePathList[1]); // valid optional param configStr = R"( { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["security_file_permission"], - "FilePathFilter": [ - "/etc/passwd", - "/etc/shadow", - "/bin" - ] - } - ] + "Type": "input_file_security", + "ProbeConfig": + { + "FilePathFilter": [ + "/etc/passwd", + "/etc/shadow", + "/bin" + ] + } } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFFileSecurity()); + input.reset(new InputFileSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); + APSARA_TEST_EQUAL(input->sName, "input_file_security"); nami::SecurityFileFilter thisFilter2 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityProbeType::FILE, input->mSecurityOptions.filter_Type); - APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); APSARA_TEST_EQUAL("/etc/passwd", thisFilter2.mFilePathList[0]); APSARA_TEST_EQUAL("/etc/shadow", thisFilter2.mFilePathList[1]); APSARA_TEST_EQUAL("/bin", thisFilter2.mFilePathList[2]); } -void InputEBPFFileSecurityUnittest::OnFailedInit() { - unique_ptr input; +void InputFileSecurityUnittest::OnFailedInit() { + unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; // invalid mandatory param configStr = R"( { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["security_file_permission"], - "FilePathFilter": [1] - } - ] + "Type": "input_file_security", + "ProbeConfig": + { + "FilePathFilter": [1] + } } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFFileSecurity()); + input.reset(new InputFileSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); + APSARA_TEST_EQUAL(input->sName, "input_file_security"); nami::SecurityFileFilter thisFilter = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); APSARA_TEST_EQUAL(0, thisFilter.mFilePathList.size()); // invalid optional param configStr = R"( { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["security_file_permission"], - "FilePathFilter": [ - "/etc", - 1 - ] - } - ] + "Type": "input_file_security", + "ProbeConfig": + { + "FilePathFilter": [ + "/etc", + 1 + ] + } } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFFileSecurity()); + input.reset(new InputFileSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); + APSARA_TEST_EQUAL(input->sName, "input_file_security"); nami::SecurityFileFilter thisFilter1 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityProbeType::FILE, input->mSecurityOptions.filter_Type); - APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); APSARA_TEST_EQUAL(0, thisFilter1.mFilePathList.size()); // lose mandatory param configStr = R"( { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - } - ] - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFFileSecurity()); - input->SetContext(ctx); - APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); - APSARA_TEST_EQUAL(1, input->mSecurityOptions.mOptionList.size()); // default callname - APSARA_TEST_EQUAL(3, input->mSecurityOptions.mOptionList[0].call_names_.size()); // default callname - - // invalid callname - configStr = R"( - { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["security_file_permission_error"], - } - ] + "Type": "input_file_security", + "ProbeConfig": + { + } } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFFileSecurity()); + input.reset(new InputFileSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); + APSARA_TEST_EQUAL(input->sName, "input_file_security"); APSARA_TEST_EQUAL(1, input->mSecurityOptions.mOptionList.size()); // default callname APSARA_TEST_EQUAL(3, input->mSecurityOptions.mOptionList[0].call_names_.size()); // default callname } -void InputEBPFFileSecurityUnittest::OnSuccessfulStart() { - unique_ptr input; +void InputFileSecurityUnittest::OnSuccessfulStart() { + unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; configStr = R"( { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["security_file_permission"], - "FilePathFilter": [ - "/etc", - "/bin" - ] - } - ] + "Type": "input_file_security", + "ProbeConfig": + { + "FilePathFilter": [ + "/etc", + "/bin" + ] + } } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFFileSecurity()); + input.reset(new InputFileSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->Start()); @@ -228,27 +205,25 @@ void InputEBPFFileSecurityUnittest::OnSuccessfulStart() { APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); } -void InputEBPFFileSecurityUnittest::OnSuccessfulStop() { - unique_ptr input; +void InputFileSecurityUnittest::OnSuccessfulStop() { + unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; configStr = R"( { - "Type": "input_ebpf_fileprobe_security", - "ProbeConfig": [ - { - "CallNameFilter": ["security_file_permission"], - "FilePathFilter": [ - "/etc", - "/bin" - ] - } - ] + "Type": "input_file_security", + "ProbeConfig": + { + "FilePathFilter": [ + "/etc", + "/bin" + ] + } } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFFileSecurity()); + input.reset(new InputFileSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->Start()); @@ -263,11 +238,13 @@ void InputEBPFFileSecurityUnittest::OnSuccessfulStop() { APSARA_TEST_TRUE(serverPipelineName.empty()); } -UNIT_TEST_CASE(InputEBPFFileSecurityUnittest, OnSuccessfulInit) -UNIT_TEST_CASE(InputEBPFFileSecurityUnittest, OnFailedInit) -UNIT_TEST_CASE(InputEBPFFileSecurityUnittest, OnSuccessfulStart) -UNIT_TEST_CASE(InputEBPFFileSecurityUnittest, OnSuccessfulStop) -// UNIT_TEST_CASE(InputEBPFFileSecurityUnittest, OnPipelineUpdate) +UNIT_TEST_CASE(InputFileSecurityUnittest, TestName) +UNIT_TEST_CASE(InputFileSecurityUnittest, TestSupportAck) +UNIT_TEST_CASE(InputFileSecurityUnittest, OnSuccessfulInit) +UNIT_TEST_CASE(InputFileSecurityUnittest, OnFailedInit) +UNIT_TEST_CASE(InputFileSecurityUnittest, OnSuccessfulStart) +UNIT_TEST_CASE(InputFileSecurityUnittest, OnSuccessfulStop) +// UNIT_TEST_CASE(InputFileSecurityUnittest, OnPipelineUpdate) } // namespace logtail diff --git a/core/unittest/input/InputEBPFNetworkObserverUnittest.cpp b/core/unittest/input/InputNetworkObserverUnittest.cpp similarity index 76% rename from core/unittest/input/InputEBPFNetworkObserverUnittest.cpp rename to core/unittest/input/InputNetworkObserverUnittest.cpp index 068c5219d7..d6b68f25f5 100644 --- a/core/unittest/input/InputEBPFNetworkObserverUnittest.cpp +++ b/core/unittest/input/InputNetworkObserverUnittest.cpp @@ -17,7 +17,7 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "ebpf/config.h" -#include "plugin/input/InputEBPFNetworkObserver.h" +#include "plugin/input/InputNetworkObserver.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" #include "unittest/Unittest.h" @@ -27,8 +27,10 @@ using namespace std; namespace logtail { -class InputEBPFNetworkObserverUnittest : public testing::Test { +class InputNetworkObserverUnittest : public testing::Test { public: + void TestName(); + void TestSupportAck(); void OnSuccessfulInit(); void OnFailedInit(); void OnSuccessfulStart(); @@ -48,15 +50,27 @@ class InputEBPFNetworkObserverUnittest : public testing::Test { PipelineContext ctx; }; -void InputEBPFNetworkObserverUnittest::OnSuccessfulInit() { - unique_ptr input; +void InputNetworkObserverUnittest::TestName() { + InputNetworkObserver input; + std::string name = input.Name(); + APSARA_TEST_EQUAL(name, "input_network_observer"); +} + +void InputNetworkObserverUnittest::TestSupportAck() { + InputNetworkObserver input; + bool supportAck = input.SupportAck(); + APSARA_TEST_FALSE(supportAck); +} + +void InputNetworkObserverUnittest::OnSuccessfulInit() { + unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; // valid optional param configStr = R"( { - "Type": "input_ebpf_sockettraceprobe_observer", + "Type": "input_network_observer", "ProbeConfig": { "EnableProtocols": [ @@ -69,10 +83,10 @@ void InputEBPFNetworkObserverUnittest::OnSuccessfulInit() { } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkObserver()); + input.reset(new InputNetworkObserver()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_observer"); + APSARA_TEST_EQUAL(input->sName, "input_network_observer"); nami::ObserverNetworkOption thisObserver = input->mNetworkOption; APSARA_TEST_EQUAL(thisObserver.mEnableProtocols.size(), 1); APSARA_TEST_EQUAL(thisObserver.mEnableProtocols[0], "http"); @@ -81,15 +95,15 @@ void InputEBPFNetworkObserverUnittest::OnSuccessfulInit() { APSARA_TEST_EQUAL(false, thisObserver.mEnableConnTrackerDump); } -void InputEBPFNetworkObserverUnittest::OnFailedInit() { - unique_ptr input; +void InputNetworkObserverUnittest::OnFailedInit() { + unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; // invalid optional param configStr = R"( { - "Type": "input_ebpf_sockettraceprobe_observer", + "Type": "input_network_observer", "ProbeConfig": { "EnableProtocols": [ @@ -102,10 +116,10 @@ void InputEBPFNetworkObserverUnittest::OnFailedInit() { } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkObserver()); + input.reset(new InputNetworkObserver()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_observer"); + APSARA_TEST_EQUAL(input->sName, "input_network_observer"); nami::ObserverNetworkOption thisObserver = input->mNetworkOption; APSARA_TEST_EQUAL(thisObserver.mEnableProtocols.size(), 1); APSARA_TEST_EQUAL(thisObserver.mEnableProtocols[0], "http"); @@ -116,7 +130,7 @@ void InputEBPFNetworkObserverUnittest::OnFailedInit() { // lag of mandatory param + error param level configStr = R"( { - "Type": "input_ebpf_sockettraceprobe_observer", + "Type": "input_network_observer", "EnableProtocols": [ "http" ], @@ -126,20 +140,20 @@ void InputEBPFNetworkObserverUnittest::OnFailedInit() { } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkObserver()); + input.reset(new InputNetworkObserver()); input->SetContext(ctx); APSARA_TEST_FALSE(input->Init(configJson, optionalGoPipeline)); } -void InputEBPFNetworkObserverUnittest::OnSuccessfulStart() { - unique_ptr input; +void InputNetworkObserverUnittest::OnSuccessfulStart() { + unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; uint32_t pluginIdx = 0; configStr = R"( { - "Type": "input_ebpf_sockettraceprobe_observer", + "Type": "input_network_observer", "ProbeConfig": { "EnableProtocols": [ @@ -152,7 +166,7 @@ void InputEBPFNetworkObserverUnittest::OnSuccessfulStart() { } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkObserver()); + input.reset(new InputNetworkObserver()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->Start()); @@ -161,14 +175,14 @@ void InputEBPFNetworkObserverUnittest::OnSuccessfulStart() { APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); } -void InputEBPFNetworkObserverUnittest::OnSuccessfulStop() { - unique_ptr input; +void InputNetworkObserverUnittest::OnSuccessfulStop() { + unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; configStr = R"( { - "Type": "input_ebpf_sockettraceprobe_observer", + "Type": "input_network_observer", "ProbeConfig": { "EnableProtocols": [ @@ -181,7 +195,7 @@ void InputEBPFNetworkObserverUnittest::OnSuccessfulStop() { } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - input.reset(new InputEBPFNetworkObserver()); + input.reset(new InputNetworkObserver()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->Start()); @@ -196,11 +210,13 @@ void InputEBPFNetworkObserverUnittest::OnSuccessfulStop() { APSARA_TEST_TRUE(serverPipelineName.empty()); } -UNIT_TEST_CASE(InputEBPFNetworkObserverUnittest, OnSuccessfulInit) -UNIT_TEST_CASE(InputEBPFNetworkObserverUnittest, OnFailedInit) -UNIT_TEST_CASE(InputEBPFNetworkObserverUnittest, OnSuccessfulStart) -UNIT_TEST_CASE(InputEBPFNetworkObserverUnittest, OnSuccessfulStop) -// UNIT_TEST_CASE(InputEBPFNetworkObserverUnittest, OnPipelineUpdate) +UNIT_TEST_CASE(InputNetworkObserverUnittest, TestName) +UNIT_TEST_CASE(InputNetworkObserverUnittest, TestSupportAck) +UNIT_TEST_CASE(InputNetworkObserverUnittest, OnSuccessfulInit) +UNIT_TEST_CASE(InputNetworkObserverUnittest, OnFailedInit) +UNIT_TEST_CASE(InputNetworkObserverUnittest, OnSuccessfulStart) +UNIT_TEST_CASE(InputNetworkObserverUnittest, OnSuccessfulStop) +// UNIT_TEST_CASE(InputNetworkObserverUnittest, OnPipelineUpdate) } // namespace logtail diff --git a/core/unittest/input/InputNetworkSecurityUnittest.cpp b/core/unittest/input/InputNetworkSecurityUnittest.cpp new file mode 100644 index 0000000000..1a36b464b4 --- /dev/null +++ b/core/unittest/input/InputNetworkSecurityUnittest.cpp @@ -0,0 +1,255 @@ +// Copyright 2023 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "app_config/AppConfig.h" +#include "common/JsonUtil.h" +#include "ebpf/config.h" +#include "plugin/input/InputNetworkSecurity.h" +#include "pipeline/Pipeline.h" +#include "pipeline/PipelineContext.h" +#include "unittest/Unittest.h" +#include "ebpf/eBPFServer.h" + +using namespace std; + +namespace logtail { + +class InputNetworkSecurityUnittest : public testing::Test { +public: + void TestName(); + void TestSupportAck(); + void OnSuccessfulInit(); + void OnFailedInit(); + void OnSuccessfulStart(); + void OnSuccessfulStop(); + // void OnPipelineUpdate(); + +protected: + void SetUp() override { + p.mName = "test_config"; + ctx.SetConfigName("test_config"); + ctx.SetPipeline(p); + ebpf::eBPFServer::GetInstance()->Init(); + } + +private: + Pipeline p; + PipelineContext ctx; +}; + +void InputNetworkSecurityUnittest::TestName() { + InputNetworkSecurity input; + std::string name = input.Name(); + APSARA_TEST_EQUAL(name, "input_network_security"); +} + +void InputNetworkSecurityUnittest::TestSupportAck() { + InputNetworkSecurity input; + bool supportAck = input.SupportAck(); + APSARA_TEST_FALSE(supportAck); +} + +void InputNetworkSecurityUnittest::OnSuccessfulInit() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + + // valid optional param + configStr = R"( + { + "Type": "input_network_security", + "ProbeConfig": + { + "AddrFilter": { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": [80], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] + } + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->sName, "input_network_security"); + nami::SecurityNetworkFilter thisFilter1 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter1.mDestAddrList[0]); + APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter1.mDestAddrList[1]); + APSARA_TEST_EQUAL(1, thisFilter1.mDestPortList.size()); + APSARA_TEST_EQUAL("127.0.0.1/8", thisFilter1.mSourceAddrBlackList[0]); + APSARA_TEST_EQUAL(9300, thisFilter1.mSourcePortBlackList[0]); +} + +void InputNetworkSecurityUnittest::OnFailedInit() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + + // invalid optional param + configStr = R"( + { + "Type": "input_network_security", + "ProbeConfig": + { + "AddrFilter": { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": ["80"], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] + } + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->sName, "input_network_security"); + nami::SecurityNetworkFilter thisFilter1 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter1.mDestAddrList[0]); + APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter1.mDestAddrList[1]); + APSARA_TEST_EQUAL(0, thisFilter1.mDestPortList.size()); + APSARA_TEST_EQUAL("127.0.0.1/8", thisFilter1.mSourceAddrBlackList[0]); + APSARA_TEST_EQUAL(9300, thisFilter1.mSourcePortBlackList[0]); + + // error param level + configStr = R"( + { + "Type": "input_network_security", + "ProbeConfig": + { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": ["80"], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + nami::SecurityNetworkFilter thisFilter5 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL(thisFilter5.mDestAddrList.size(), 0); + APSARA_TEST_EQUAL(thisFilter5.mDestPortList.size(), 0); + APSARA_TEST_EQUAL(thisFilter5.mSourceAddrBlackList.size(), 0); + APSARA_TEST_EQUAL(thisFilter5.mSourcePortBlackList.size(), 0); + + // valid and invalid optional param + // if the optional param in a list is invalid, the valid param after it will be read + configStr = R"( + { + "Type": "input_network_security", + "ProbeConfig": + { + "AddrFilter": { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": [40, "80", 160], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] + } + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + nami::SecurityNetworkFilter thisFilter6 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL(2, thisFilter6.mDestAddrList.size()); +} + +void InputNetworkSecurityUnittest::OnSuccessfulStart() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + + configStr = R"( + { + "Type": "input_network_security", + "ProbeConfig": + { + "AddrFilter": { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": [80], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] + } + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_TRUE(input->Start()); + string serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); + string pipelineName = input->GetContext().GetConfigName(); + APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); +} + +void InputNetworkSecurityUnittest::OnSuccessfulStop() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + + configStr = R"( + { + "Type": "input_network_security", + "ProbeConfig": + { + "AddrFilter": { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": [80], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] + } + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_TRUE(input->Start()); + string serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); + string pipelineName = input->GetContext().GetConfigName(); + APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); + APSARA_TEST_TRUE(input->Stop(false)); + serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); + APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); + APSARA_TEST_TRUE(input->Stop(true)); + serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::NETWORK_SECURITY); + APSARA_TEST_TRUE(serverPipelineName.empty()); +} + +UNIT_TEST_CASE(InputNetworkSecurityUnittest, TestName) +UNIT_TEST_CASE(InputNetworkSecurityUnittest, TestSupportAck) +UNIT_TEST_CASE(InputNetworkSecurityUnittest, OnSuccessfulInit) +UNIT_TEST_CASE(InputNetworkSecurityUnittest, OnFailedInit) +UNIT_TEST_CASE(InputNetworkSecurityUnittest, OnSuccessfulStart) +UNIT_TEST_CASE(InputNetworkSecurityUnittest, OnSuccessfulStop) +// UNIT_TEST_CASE(InputNetworkSecurityUnittest, OnPipelineUpdate) + +} // namespace logtail + +UNIT_TEST_MAIN diff --git a/core/unittest/input/InputProcessSecurityUnittest.cpp b/core/unittest/input/InputProcessSecurityUnittest.cpp new file mode 100644 index 0000000000..1658590705 --- /dev/null +++ b/core/unittest/input/InputProcessSecurityUnittest.cpp @@ -0,0 +1,140 @@ +// Copyright 2023 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "app_config/AppConfig.h" +#include "common/JsonUtil.h" +#include "ebpf/config.h" +#include "plugin/input/InputProcessSecurity.h" +#include "pipeline/Pipeline.h" +#include "pipeline/PipelineContext.h" +#include "unittest/Unittest.h" +#include "ebpf/eBPFServer.h" + +using namespace std; + +namespace logtail { + +class InputProcessSecurityUnittest : public testing::Test { +public: + void TestName(); + void TestSupportAck(); + void OnSuccessfulInit(); + void OnSuccessfulStart(); + void OnSuccessfulStop(); + // void OnPipelineUpdate(); + +protected: + void SetUp() override { + p.mName = "test_config"; + ctx.SetConfigName("test_config"); + ctx.SetPipeline(p); + ebpf::eBPFServer::GetInstance()->Init(); + } + +private: + Pipeline p; + PipelineContext ctx; +}; + +void InputProcessSecurityUnittest::TestName() { + InputProcessSecurity input; + std::string name = input.Name(); + APSARA_TEST_EQUAL(name, "input_process_security"); +} + +void InputProcessSecurityUnittest::TestSupportAck() { + InputProcessSecurity input; + bool supportAck = input.SupportAck(); + APSARA_TEST_FALSE(supportAck); +} + +void InputProcessSecurityUnittest::OnSuccessfulInit() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + + // valid param + configStr = R"( + { + "Type": "input_process_security", + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->sName, "input_process_security"); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); + // no general filter, default is monostate + APSARA_TEST_EQUAL(std::holds_alternative(input->mSecurityOptions.mOptionList[0].filter_), true); +} + +void InputProcessSecurityUnittest::OnSuccessfulStart() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + + configStr = R"( + { + "Type": "input_process_security", + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_TRUE(input->Start()); + string serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); + string pipelineName = input->GetContext().GetConfigName(); + APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); +} + +void InputProcessSecurityUnittest::OnSuccessfulStop() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + + configStr = R"( + { + "Type": "input_process_security", + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_TRUE(input->Start()); + string serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); + string pipelineName = input->GetContext().GetConfigName(); + APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); + APSARA_TEST_TRUE(input->Stop(false)); + serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); + APSARA_TEST_TRUE(serverPipelineName.size() && serverPipelineName == pipelineName); + APSARA_TEST_TRUE(input->Stop(true)); + serverPipelineName = ebpf::eBPFServer::GetInstance()->CheckLoadedPipelineName(nami::PluginType::PROCESS_SECURITY); + APSARA_TEST_TRUE(serverPipelineName.empty()); +} + +UNIT_TEST_CASE(InputProcessSecurityUnittest, TestName) +UNIT_TEST_CASE(InputProcessSecurityUnittest, TestSupportAck) +UNIT_TEST_CASE(InputProcessSecurityUnittest, OnSuccessfulInit) +UNIT_TEST_CASE(InputProcessSecurityUnittest, OnSuccessfulStart) +UNIT_TEST_CASE(InputProcessSecurityUnittest, OnSuccessfulStop) +// UNIT_TEST_CASE(InputProcessSecurityUnittest, OnPipelineUpdate) + +} // namespace logtail + +UNIT_TEST_MAIN diff --git a/docs/cn/SUMMARY.md b/docs/cn/SUMMARY.md index d897d68602..037863731a 100644 --- a/docs/cn/SUMMARY.md +++ b/docs/cn/SUMMARY.md @@ -68,10 +68,10 @@ * [OTLP数据](plugins/input/service-otlp.md) * [PostgreSQL 查询数据](plugins/input/service-pgsql.md) * [Syslog数据](plugins/input/service-syslog.md) - * [eBPF文件安全数据](plugins/input/input-ebpf-file-security.md) - * [eBPF网络可观测数据](plugins/input/input-ebpf-network-observer.md) - * [eBPF网络安全数据](plugins/input/input-ebpf-network-security.md) - * [eBPF进程安全数据](plugins/input/input-ebpf-process-security.md) + * [文件安全数据](plugins/input/input-file-security.md) + * [网络可观测数据](plugins/input/input-network-observer.md) + * [网络安全数据](plugins/input/input-network-security.md) + * [进程安全数据](plugins/input/input-process-security.md) * [处理](plugins/processor/README.md) * [原生插件](plugins/processor/native/README.md) * [正则解析](plugins/processor/native/processor-parse-regex-native.md) diff --git a/docs/cn/configuration/system-config.md b/docs/cn/configuration/system-config.md index 96e3bf5f18..805ad2e79f 100644 --- a/docs/cn/configuration/system-config.md +++ b/docs/cn/configuration/system-config.md @@ -48,7 +48,6 @@ | 参数 | 类型 | 说明 | | ----------------------- |--------|------------------------------------------------------------------------------------------------------------| -| `USE_CONTAINERD` | Bool | 是否使用containerd runtime,非必选。ilogtail会自动通过接口探测。 | | `CONTAINERD_SOCK_PATH` | String | 自定义containerd sock路径,非必选。默认为/run/containerd/containerd.sock。自定义取值可以通过查看/etc/containerd/config.toml grpc.address字段获取。 | | `CONTAINERD_STATE_DIR` | String | 自定义containerd 数据目录,非必选。自定义取值可以通过查看/etc/containerd/config.toml state字段获取。 | | `LOGTAIL_LOG_LEVEL` | String | 用于控制/apsara/sls/ilogtail和golang插件的日志等级,支持通用日志等级,如trace, debug,info,warning,error,fatal| diff --git a/docs/cn/plugins/flusher/flusher-http.md b/docs/cn/plugins/flusher/flusher-http.md index 8bd5a88fdc..4cb319bb68 100644 --- a/docs/cn/plugins/flusher/flusher-http.md +++ b/docs/cn/plugins/flusher/flusher-http.md @@ -1,4 +1,4 @@ -# 标准输出/文件 +# HTTP ## 简介 @@ -21,6 +21,9 @@ | Retry.MaxRetryTimes | Int | 否 | 最大重试次数,默认为 `3` | | Retry.InitialDelay | String | 否 | 首次重试时间间隔,默认为 `1s`,重试间隔以会2的倍数递增 | | Retry.MaxDelay | String | 否 | 最大重试时间间隔,默认为 `30s` | +| Encoder | Struct | 否 | ilogtail数据编码协议配置,优先级高于Convert | +| Encoder.Type | String | 否 | encoder 插件的类型 | +| Encoder.Options | Map | 否 | encoder 插件的配置 | | Convert | Struct | 否 | ilogtail数据转换协议配置 | | Convert.Protocol | String | 否 | ilogtail数据转换协议,可选值:`custom_single`,`influxdb`, `jsonline`。默认值:`custom_single`

v2版本可选值:`raw`

| | Convert.Encoding | String | 否 | ilogtail flusher数据转换编码,可选值:`json`, `custom`,默认值:`json` | @@ -34,6 +37,9 @@ | IdleConnTimeout | String | 否 | HTTP连接在关闭前保持闲置状态的最长时间,默认`90s`

当其值大于http.DefaultTransport.(*http.Transport).IdleConnTimeout时(当前是`90s`),会采用该值 | | WriteBufferSize | Int | 否 | 写缓冲区的大小,不填不会给http.DefaultTransport.(*http.Transport).WriteBufferSize赋值,此时采用默认的`4KB`

当其值大于0时,会采用该值 | | QueueCapacity | Int | 否 | 内部channel的缓存大小,默认为1024 +| Authenticator | Struct | 否 | 鉴权扩展插件配置 | +| Authenticator.Type | String | 否 | 鉴权扩展插件类型 | +| Authenticator.Options | Map | 否 | 鉴权扩展插件配置内容 | | AsyncIntercept | Boolean | 否 | 异步过滤数据,默认为否 | DropEventWhenQueueFull | Boolean | 否 | 当队列满时是否丢弃数据,否则需要等待,默认为不丢弃 | | Compression | string | 否 | 压缩策略,目前支持gzip和snappy,默认不开启 | @@ -107,6 +113,7 @@ flushers: SeriesLimit: 1024 Authenticator: Type: ext_basicauth + Compression: 'snappy' extensions: - Type: ext_basicauth Username: 'YOUR_USERNAME' diff --git a/docs/cn/plugins/flusher/flusher-prometheus.md b/docs/cn/plugins/flusher/flusher-prometheus.md new file mode 100644 index 0000000000..9265c9d1b7 --- /dev/null +++ b/docs/cn/plugins/flusher/flusher-prometheus.md @@ -0,0 +1,61 @@ +# Prometheus + +## 简介 + +`flusher_prometheus` `flusher`插件可以实现将采集到的数据,经过处理后,通过http格式发送到指定的 Prometheus RemoteWrite 地址。 +参数配置大部分继承`flusher_http`,详见[flusher_http](flusher-http.md)。 + +## 版本 + +[Alpha](../stability-level.md) + +## 配置参数 + +| 参数 | 类型 | 是否必选 | 说明 | +|------------------------|---------------------|------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Type | String | 是 | 插件类型,固定为`flusher_prometheus` | +| Endpoint | String | 是 | 要发送到的URL地址,遵从Prometheus RemoteWrite协议,示例:`http://localhost:8086/api/v1/write` | +| SeriesLimit | Int | 否 | 一次序列化 Prometheus RemoteWrite 请求的时间序列的最大长度,默认1000 | +| Headers | Map | 否 | 发送时附加的http请求header,如可添加 Authorization、Content-Type等信息,支持动态变量写法,如`{"x-db":"%{tag.db}"}`

v2版本支持从Group的Metadata或者Group.Tags中获取动态变量,如`{"x-db":"%{metadata.db}"}`或者`{"x-db":"%{tag.db}"}`

默认注入prometheus相关的Header(e.g. snappy压缩)

| +| Query | Map | 否 | 发送时附加到url上的query参数,支持动态变量写法,如`{"db":"%{tag.db}"}`

v2版本支持从Group的Metadata或者Group.Tags中获取动态变量,如`{"db":"%{metadata.db}"}`或者`{"db":"%{tag.db}"}`

| +| Timeout | String | 否 | 请求的超时时间,默认 `60s` | +| Retry.Enable | Boolean | 否 | 是否开启失败重试,默认为 `true` | +| Retry.MaxRetryTimes | Int | 否 | 最大重试次数,默认为 `3` | +| Retry.InitialDelay | String | 否 | 首次重试时间间隔,默认为 `1s`,重试间隔以会2的倍数递增 | +| Retry.MaxDelay | String | 否 | 最大重试时间间隔,默认为 `30s` | +| Concurrency | Int | 否 | 向url发起请求的并发数,默认为`1` | +| MaxConnsPerHost | Int | 否 | 每个host上的最大HTTP连接数(包含了拨号阶段的、活跃的、空闲的),默认`50` | +| MaxIdleConnsPerHost | Int | 否 | 每个host上的最大空闲的HTTP连接数,默认`50` | +| IdleConnTimeout | String | 否 | HTTP连接在关闭前保持闲置状态的最长时间,默认`90s`

当其值大于http.DefaultTransport.(*http.Transport).IdleConnTimeout时(当前是`90s`),会采用该值 | +| WriteBufferSize | Int | 否 | 写缓冲区的大小,默认`64KB` | +| QueueCapacity | Int | 否 | 内部channel的缓存大小,默认为1024 +| Authenticator | Struct | 否 | 鉴权扩展插件配置 | +| Authenticator.Type | String | 否 | 鉴权扩展插件类型 | +| Authenticator.Options | Map | 否 | 鉴权扩展插件配置内容 | +| AsyncIntercept | Boolean | 否 | 异步过滤数据,默认为否 +| DropEventWhenQueueFull | Boolean | 否 | 当队列满时是否丢弃数据,否则需要等待,默认为丢弃 | + +## 样例 + +采集Prometheus指标,并将指标以Prometheus协议发送到`PROMETHEUS_REMOTEWRITE_ADDRESS`。 +这里用到了`ext_default_encoder`插件(默认集成,无需用户手动配置),该插件可以配置使用Prometheus Encoder,从而支持将采集到的数据转换为Prometheus协议。 +```yaml +enable: true +global: + StructureType: v2 +inputs: +- Type: service_prometheus + ConfigFilePath: '/etc/prometheus/prometheus.yml' +flushers: +- Type: flusher_prometheus + Endpoint: 'http://PROMETHEUS_REMOTEWRITE_ADDRESS/api/v1/write' + Concurrency: 10 + QueueCapacity: 4096 + DropEventWhenQueueFull: true + Authenticator: + Type: ext_basicauth +extensions: +- Type: ext_basicauth + Username: 'YOUR_USERNAME' + Password: 'YOUR_PASSWORD' +``` \ No newline at end of file diff --git a/docs/cn/plugins/input/input-ebpf-file-security.md b/docs/cn/plugins/input/input-ebpf-file-security.md deleted file mode 100644 index 9c3045282f..0000000000 --- a/docs/cn/plugins/input/input-ebpf-file-security.md +++ /dev/null @@ -1,56 +0,0 @@ -# input_ebpf_file_security 插件 - -## 简介 - -`input_ebpf_file_security`插件可以实现利用ebpf探针采集文件安全相关动作。 - -## 版本 - -[Dev](../stability-level.md) - -## 配置参数 - -| **参数** | **类型** | **是否必填** | **默认值** | **说明** | -| --- | --- | --- | --- | --- | -| Type | string | 是 | / | 插件类型。固定为input\_ebpf\_file\_security | -| ProbeConfig | \[object\] | 否 | ProbeConfig 默认包含一个 Option,其中包含一个默认取全部值的 CallNameFilter,其他 Filter 默认为空 | ProbeConfig 可以包含多个 Option, Option 内部有多个 Filter,Filter 内部是或的关系,Filter 之间是且的关系,Option 之间是或的关系 | -| ProbeConfig[xx].CallNameFilter | \[string\] | 否 | 该插件支持的所有 callname: [ security_file_permission security_mmap_file security_path_truncate ] | 内核挂载点过滤器,按照白名单模式运行,不填表示配置该插件所支持的所有挂载点 | -| ProbeConfig[xx].FilePathFilter | \[string\] | 否 | 空 | 文件路径过滤器,按照白名单模式运行,不填表示不进行过滤 | - -## 样例 - -### XXXX - -* 输入 - -```json -TODO -``` - -* 采集配置 - -```yaml -enable: true -inputs: - - Type: input_ebpf_fileprobe_security - ProbeConfig: - - CallNameFilter: - - "security_file_permission" - FilePathFilter: - - "/etc/passwd" - - "/lib" - - CallNameFilter: - - "security_path_truncate" - FilePathFilter: - - "/etc/passwd" -flushers: - - Type: flusher_stdout - OnlyStdout: true - Tags: true -``` - -* 输出 - -```json -TODO -``` diff --git a/docs/cn/plugins/input/input-ebpf-process-security.md b/docs/cn/plugins/input/input-ebpf-process-security.md deleted file mode 100644 index 5421eac161..0000000000 --- a/docs/cn/plugins/input/input-ebpf-process-security.md +++ /dev/null @@ -1,45 +0,0 @@ -# input_ebpf_process_security 插件 - -## 简介 - -`input_ebpf_process_security`插件可以实现利用ebpf探针采集进程安全相关动作。 - -## 版本 - -[Dev](../stability-level.md) - -## 配置参数 - -| **参数** | **类型** | **是否必填** | **默认值** | **说明** | -| --- | --- | --- | --- | --- | -| Type | string | 是 | / | 插件类型。固定为input\_ebpf\_process\_security | -| ProbeConfig | \[object\] | 否 | ProbeConfig 默认包含一个 Option,其中包含一个默认取全部值的 CallNameFilter,其他 Filter 默认为空 | ProbeConfig 可以包含多个 Option, Option 内部有多个 Filter,Filter 内部是或的关系,Filter 之间是且的关系,Option 之间是或的关系 | -| ProbeConfig[xx].CallNameFilter | \[string\] | 否 | 该插件支持的所有 callname: [ sys_enter_execve sys_enter_clone disassociate_ctty acct_process wake_up_new_task ] | 内核挂载点过滤器,按照白名单模式运行,不填表示配置该插件所支持的所有挂载点 | - -## 样例 - -### XXXX - -* 输入 - -```json -TODO -``` - -* 采集配置 - -```yaml -enable: true -inputs: - - Type: input_ebpf_processprobe_security -flushers: - - Type: flusher_stdout - OnlyStdout: true - Tags: true -``` - -* 输出 - -```json -TODO -``` diff --git a/docs/cn/plugins/input/input-file-security.md b/docs/cn/plugins/input/input-file-security.md new file mode 100644 index 0000000000..878cfe0e51 --- /dev/null +++ b/docs/cn/plugins/input/input-file-security.md @@ -0,0 +1,49 @@ +# input_file_security 插件 + +## 简介 + +`input_file_security`插件可以实现利用ebpf探针采集文件安全相关动作。 + +## 版本 + +[Dev](../stability-level.md) + +## 配置参数 + +| **参数** | **类型** | **是否必填** | **默认值** | **说明** | +| --- | --- | --- | --- | --- | +| Type | string | 是 | / | 插件类型。固定为input\_file\_security | +| ProbeConfig | object | 否 | ProbeConfig 包含默认为空的 Filter | ProbeConfig 内部包含 Filter,Filter 内部是或的关系 | +| ProbeConfig[xx].FilePathFilter | \[string\] | 否 | 空 | 文件路径过滤器,按照白名单模式运行,不填表示不进行过滤 | + +## 样例 + +### XXXX + +* 输入 + +```json +TODO +``` + +* 采集配置 + +```yaml +enable: true +inputs: + - Type: input_file_security + ProbeConfig: + FilePathFilter: + - "/etc/passwd" + - "/lib" +flushers: + - Type: flusher_stdout + OnlyStdout: true + Tags: true +``` + +* 输出 + +```json +TODO +``` diff --git a/docs/cn/plugins/input/input-ebpf-network-observer.md b/docs/cn/plugins/input/input-network-observer.md similarity index 86% rename from docs/cn/plugins/input/input-ebpf-network-observer.md rename to docs/cn/plugins/input/input-network-observer.md index f3c9f845aa..15c0b7ab85 100644 --- a/docs/cn/plugins/input/input-ebpf-network-observer.md +++ b/docs/cn/plugins/input/input-network-observer.md @@ -1,8 +1,8 @@ -# input_ebpf_network_observer 插件 +# input_network_observer 插件 ## 简介 -`input_ebpf_network_observer`插件可以实现利用ebpf探针采集网络可观测数据。 +`input_network_observer`插件可以实现利用ebpf探针采集网络可观测数据。 ## 版本 @@ -12,7 +12,7 @@ | **参数** | **类型** | **是否必填** | **默认值** | **说明** | | --- | --- | --- | --- | --- | -| Type | string | 是 | / | 插件类型。固定为input\_ebpf\_network\_observer | +| Type | string | 是 | / | 插件类型。固定为input\_network\_observer | | ProbeConfig | object | 是 | / | 插件配置参数列表 | | ProbeConfig.EnableLog | bool | 否 | true | 是否开启日志上报 | | ProbeConfig.EnableSpan | bool | 否 | false | 是否开启跨度上报 | @@ -37,7 +37,7 @@ TODO ```yaml enable: true inputs: - - Type: input_ebpf_sockettraceprobe_observer + - Type: input_network_observer ProbeConfig: EnableLog: true EnableMetric: false diff --git a/docs/cn/plugins/input/input-ebpf-network-security.md b/docs/cn/plugins/input/input-network-security.md similarity index 55% rename from docs/cn/plugins/input/input-ebpf-network-security.md rename to docs/cn/plugins/input/input-network-security.md index 64d95d31fa..ad45ba1bbd 100644 --- a/docs/cn/plugins/input/input-ebpf-network-security.md +++ b/docs/cn/plugins/input/input-network-security.md @@ -1,8 +1,8 @@ -# input_ebpf_network_security 插件 +# input_network_security 插件 ## 简介 -`input_ebpf_network_security`插件可以实现利用ebpf探针采集网络安全相关动作。 +`input_network_security`插件可以实现利用ebpf探针采集网络安全相关动作。 ## 版本 @@ -12,9 +12,8 @@ | **参数** | **类型** | **是否必填** | **默认值** | **说明** | | --- | --- | --- | --- | --- | -| Type | string | 是 | / | 插件类型。固定为input\_ebpf\_network\_security | -| ProbeConfig | \[object\] | 否 | ProbeConfig 默认包含一个 Option,其中包含一个默认取全部值的 CallNameFilter,其他 Filter 默认为空 | ProbeConfig 可以包含多个 Option, Option 内部有多个 Filter,Filter 内部是或的关系,Filter 之间是且的关系,Option 之间是或的关系 | -| ProbeConfig[xx].CallNameFilter | \[string\] | 否 | 该插件支持的所有 callname: [ tcp_connect tcp_close tcp_sendmsg ] | 内核挂载点过滤器,按照白名单模式运行,不填表示配置该插件所支持的所有挂载点 | +| Type | string | 是 | / | 插件类型。固定为input\_network\_security | +| ProbeConfig | object | 否 | ProbeConfig 包含默认为空的 Filter | ProbeConfig 内部包含 Filter,Filter 内部是或的关系 | | ProbeConfig[xx].AddrFilter | object | 否 | / | 网络地址过滤器 | | ProbeConfig[xx].AddrFilter.DestAddrList | \[string\] | 否 | 空 | 目的IP地址白名单,不填表示不进行过滤 | | ProbeConfig[xx].AddrFilter.DestPortList | \[string\] | 否 | 空 | 目的端口白名单,不填表示不进行过滤 | @@ -40,29 +39,18 @@ TODO ```yaml enable: true inputs: - - Type: input_ebpf_sockettraceprobe_security + - Type: input_network_security ProbeConfig: - - CallNameFilter: - - "tcp_connect" - - "tcp_close" - AddrFilter: - DestAddrList: - - "10.0.0.0/8" - - "92.168.0.0/16" - DestPortList: - - 80 - SourceAddrBlackList: - - "127.0.0.1/8" - SourcePortBlackList: - - 9300 - - CallNameFilter: - - "tcp_sendmsg" - AddrFilter: - DestAddrList: - - "10.0.0.0/8" - - "92.168.0.0/16" - DestPortList: - - 80 + AddrFilter: + DestAddrList: + - "10.0.0.0/8" + - "92.168.0.0/16" + DestPortList: + - 80 + SourceAddrBlackList: + - "127.0.0.1/8" + SourcePortBlackList: + - 9300 flushers: - Type: flusher_stdout OnlyStdout: true diff --git a/docs/cn/plugins/input/input-process-security.md b/docs/cn/plugins/input/input-process-security.md new file mode 100644 index 0000000000..955eac3042 --- /dev/null +++ b/docs/cn/plugins/input/input-process-security.md @@ -0,0 +1,43 @@ +# input_process_security 插件 + +## 简介 + +`input_process_security`插件可以实现利用ebpf探针采集进程安全相关动作。 + +## 版本 + +[Dev](../stability-level.md) + +## 配置参数 + +| **参数** | **类型** | **是否必填** | **默认值** | **说明** | +| --- | --- | --- | --- | --- | +| Type | string | 是 | / | 插件类型。固定为input\_process\_security | + +## 样例 + +### XXXX + +* 输入 + +```json +TODO +``` + +* 采集配置 + +```yaml +enable: true +inputs: + - Type: input_process_security +flushers: + - Type: flusher_stdout + OnlyStdout: true + Tags: true +``` + +* 输出 + +```json +TODO +``` diff --git a/docs/cn/plugins/input/service-kubernetesmeta-v2.md b/docs/cn/plugins/input/service-kubernetesmeta-v2.md index b11634fa38..df4c4589ab 100644 --- a/docs/cn/plugins/input/service-kubernetesmeta-v2.md +++ b/docs/cn/plugins/input/service-kubernetesmeta-v2.md @@ -15,6 +15,7 @@ | 参数 | 类型,默认值 | 说明 | | - | - | - | | Type | String,无默认值(必填) | 插件类型,固定为`service_syslog`。 | +| Domain | String,默认值为空(必填) | 实体域,ACK集群填写"acs", 普通集群填写"infra"。 | | Interval | int, 30 | 采集间隔时间,单位为秒。 | | Pod | bool, false | 是否采集Pod元数据。 | | Node | bool, false | 是否采集Node元数据。 | @@ -31,8 +32,6 @@ | PersistentVolumeClaim | bool, false | 是否采集PersistentVolumeClaim元数据。 | | StorageClass | bool, false | 是否采集StorageClass元数据。 | | Ingress | bool, false | 是否采集Ingress元数据。 | -| PodReplicasetLink | bool, false | 是否采集Pod与ReplicaSet之间的关系。 | -| PodServiceLink | bool, false | 是否采集Pod与Service之间的关系。 | ## 环境变量 diff --git a/docs/cn/plugins/overview.md b/docs/cn/plugins/overview.md index c0ccf6a220..55b119b3d9 100644 --- a/docs/cn/plugins/overview.md +++ b/docs/cn/plugins/overview.md @@ -26,10 +26,10 @@ | [`service_otlp`](input/service-otlp.md)
OTLP数据 | 社区
[`Zhu Shunjia`](https://github.com/shunjiazhu) | 通过http/grpc协议,接收OTLP数据。 | | [`service_pgsql`](input/service-pgsql.md)
PostgreSQL查询数据 | SLS官方 | 将PostgresSQL数据输入到iLogtail。 | | [`service_syslog`](input/service-syslog.md)
Syslog数据 | SLS官方 | 采集syslog数据。 | -| [`input_ebpf_file_security`](input/input-ebpf-file-security.md)
eBPF文件安全数据 | SLS官方 | eBPF文件安全数据采集。 | -| [`input_ebpf_network_observer`](input/input-ebpf-network-observer.md)
eBPF网络可观测数据 | SLS官方 | eBPF网络可观测数据采集。 | -| [`input_ebpf_network_security`](input/input-ebpf-network-security.md)
eBPF网络安全数据 | SLS官方 | eBPF网络安全数据采集。 | -| [`input_ebpf_process_security`](input/input-ebpf-process-security.md)
eBPF进程安全数据 | SLS官方 | eBPF进程安全数据采集。 | +| [`input_file_security`](input/input-file-security.md)
文件安全数据 | SLS官方 | 文件安全数据采集。 | +| [`input_network_observer`](input/input-network-observer.md)
网络可观测数据 | SLS官方 | 网络可观测数据采集。 | +| [`input_network_security`](input/input-network-security.md)
网络安全数据 | SLS官方 | 网络安全数据采集。 | +| [`input_process_security`](input/input-process-security.md)
进程安全数据 | SLS官方 | 进程安全数据采集。 | ## 处理 diff --git a/go.mod b/go.mod index 0febacd384..097ee40769 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/elastic/go-lumber v0.1.0 github.com/go-mysql-org/go-mysql v1.8.0 github.com/go-ping/ping v0.0.0-20211130115550-779d1e919534 + github.com/go-playground/validator/v10 v10.22.0 github.com/go-sql-driver/mysql v1.7.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 @@ -45,6 +46,7 @@ require ( github.com/prometheus/client_golang v1.14.0 github.com/prometheus/common v0.42.0 github.com/prometheus/procfs v0.8.0 + github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 github.com/pyroscope-io/pyroscope v1.5.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sirupsen/logrus v1.8.1 @@ -124,6 +126,7 @@ require ( github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/frankban/quicktest v1.14.5 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.6.1 // indirect github.com/go-kit/kit v0.12.0 // indirect @@ -134,6 +137,8 @@ require ( github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.21.1 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect @@ -178,6 +183,7 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/compress v1.17.7 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/linkedin/goavro/v2 v2.9.8 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect @@ -208,7 +214,6 @@ require ( github.com/pingcap/tidb/pkg/parser v0.0.0-20231103042308-035ad5ccbe67 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 // indirect github.com/pyroscope-io/jfr-parser v0.6.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect diff --git a/go.sum b/go.sum index c223237b97..14c90e75cf 100644 --- a/go.sum +++ b/go.sum @@ -535,6 +535,8 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5 github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= @@ -613,9 +615,16 @@ github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUri github.com/go-ping/ping v0.0.0-20211130115550-779d1e919534 h1:dhy9OQKGBh4zVXbjwbxxHjRxMJtLXj3zfgpBYQaR4Q4= github.com/go-ping/ping v0.0.0-20211130115550-779d1e919534/go.mod h1:xIFjORFzTxqIV/tDVGO4eDy/bLuSyawEeojSm3GfRGk= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao= +github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -1048,6 +1057,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= diff --git a/pkg/flags/flags.go b/pkg/flags/flags.go index 044ac557c6..edb3a1e6f2 100644 --- a/pkg/flags/flags.go +++ b/pkg/flags/flags.go @@ -16,63 +16,108 @@ package flags import ( "context" + "encoding/json" "flag" + "os" + "sync" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/util" ) -var K8sFlag = flag.Bool("ALICLOUD_LOG_K8S_FLAG", false, "alibaba log k8s event config flag, set true if you want to use it") - -// DockerConfigInitFlag is the alibaba log docker env config flag, set yes if you want to use it. And it is also a special flag to control enable go part in ilogtail. If you just want to -// enable logtail plugin and off the env config, set the env called ALICLOUD_LOG_PLUGIN_ENV_CONFIG with false. -var DockerConfigInitFlag = flag.Bool("ALICLOUD_LOG_DOCKER_ENV_CONFIG", false, "alibaba log docker env config flag, set true if you want to use it") -var DockerConfigPluginInitFlag = flag.Bool("ALICLOUD_LOG_PLUGIN_ENV_CONFIG", true, "alibaba log docker env config flag, set true if you want to use it") - -// AliCloudECSFlag set true if your docker is on alicloud ECS, so we can use ECS meta -var AliCloudECSFlag = flag.Bool("ALICLOUD_LOG_ECS_FLAG", false, "set true if your docker is on alicloud ECS, so we can use ECS meta") - -// DockerConfigPrefix docker env config prefix -var DockerConfigPrefix = flag.String("ALICLOUD_LOG_DOCKER_CONFIG_PREFIX", "aliyun_logs_", "docker env config prefix") - -// LogServiceEndpoint default project to create config -// https://www.alibabacloud.com/help/doc-detail/29008.htm -var LogServiceEndpoint = flag.String("ALICLOUD_LOG_ENDPOINT", "cn-hangzhou.log.aliyuncs.com", "log service endpoint of your project's region") - -// DefaultLogProject default project to create config -var DefaultLogProject = flag.String("ALICLOUD_LOG_DEFAULT_PROJECT", "", "default project to create config") - -// DefaultLogMachineGroup default project to create config -var DefaultLogMachineGroup = flag.String("ALICLOUD_LOG_DEFAULT_MACHINE_GROUP", "", "default project to create config") - -// LogResourceCacheExpireSec log service's resources cache expire seconds -var LogResourceCacheExpireSec = flag.Int("ALICLOUD_LOG_CACHE_EXPIRE_SEC", 600, "log service's resources cache expire seconds") - -// LogOperationMaxRetryTimes log service's operation max retry times -var LogOperationMaxRetryTimes = flag.Int("ALICLOUD_LOG_OPERATION_MAX_TRY", 3, "log service's operation max retry times") - -// DefaultAccessKeyID your log service's access key id -var DefaultAccessKeyID = flag.String("ALICLOUD_LOG_ACCESS_KEY_ID", "xxxxxxxxx", "your log service's access key id") - -// DefaultAccessKeySecret your log service's access key secret -var DefaultAccessKeySecret = flag.String("ALICLOUD_LOG_ACCESS_KEY_SECRET", "xxxxxxxxx", "your log service's access key secret") - -// DefaultSTSToken your sts token -var DefaultSTSToken = flag.String("ALICLOUD_LOG_STS_TOKEN", "", "set sts token if you use sts") - -// LogConfigPrefix config prefix -var LogConfigPrefix = flag.String("ALICLOUD_LOG_CONFIG_PREFIX", "aliyun_logs_", "config prefix") - -// DockerEnvUpdateInterval docker env config update interval seconds -var DockerEnvUpdateInterval = flag.Int("ALICLOUD_LOG_ENV_CONFIG_UPDATE_INTERVAL", 10, "docker env config update interval seconds") +const ( + DeployDaemonset = "daemonset" + DeployStatefulSet = "statefulset" + DeploySingleton = "singleton" +) -// ProductAPIDomain product domain -var ProductAPIDomain = flag.String("ALICLOUD_LOG_PRODUCT_DOMAIN", "sls.aliyuncs.com", "product domain config") +const ( + DefaultGlobalConfig = `{"InputIntervalMs":5000,"AggregatIntervalMs":30,"FlushIntervalMs":30,"DefaultLogQueueSize":11,"DefaultLogGroupQueueSize":12}` + DefaultPluginConfig = `{"inputs":[{"type":"metric_mock","detail":{"Tags":{"tag1":"aaaa","tag2":"bbb"},"Fields":{"content":"xxxxx","time":"2017.09.12 20:55:36"}}}],"flushers":[{"type":"flusher_stdout"}]}` + DefaultFlusherConfig = `{"type":"flusher_sls","detail":{}}` +) -// DefaultRegion default log region" -var DefaultRegion = flag.String("ALICLOUD_LOG_REGION", "", "default log region") +var ( + flusherType string + flusherCfg map[string]interface{} + flusherLoadOnce sync.Once +) -var SelfEnvConfigFlag bool +// flags used to control ilogtail. +var ( + K8sFlag = flag.Bool("ALICLOUD_LOG_K8S_FLAG", false, "alibaba log k8s event config flag, set true if you want to use it") + // DockerConfigInitFlag is the alibaba log docker env config flag, set yes if you want to use it. And it is also a special flag to control enable go part in ilogtail. If you just want to + // enable logtail plugin and off the env config, set the env called ALICLOUD_LOG_PLUGIN_ENV_CONFIG with false. + DockerConfigInitFlag = flag.Bool("ALICLOUD_LOG_DOCKER_ENV_CONFIG", false, "alibaba log docker env config flag, set true if you want to use it") + DockerConfigPluginInitFlag = flag.Bool("ALICLOUD_LOG_PLUGIN_ENV_CONFIG", true, "alibaba log docker env config flag, set true if you want to use it") + // AliCloudECSFlag set true if your docker is on alicloud ECS, so we can use ECS meta + AliCloudECSFlag = flag.Bool("ALICLOUD_LOG_ECS_FLAG", false, "set true if your docker is on alicloud ECS, so we can use ECS meta") + + // DockerConfigPrefix docker env config prefix + DockerConfigPrefix = flag.String("ALICLOUD_LOG_DOCKER_CONFIG_PREFIX", "aliyun_logs_", "docker env config prefix") + + // LogServiceEndpoint default project to create config + // https://www.alibabacloud.com/help/doc-detail/29008.htm + LogServiceEndpoint = flag.String("ALICLOUD_LOG_ENDPOINT", "cn-hangzhou.log.aliyuncs.com", "log service endpoint of your project's region") + + // DefaultLogProject default project to create config + DefaultLogProject = flag.String("ALICLOUD_LOG_DEFAULT_PROJECT", "", "default project to create config") + + // DefaultLogMachineGroup default project to create config + DefaultLogMachineGroup = flag.String("ALICLOUD_LOG_DEFAULT_MACHINE_GROUP", "", "default project to create config") + + // LogResourceCacheExpireSec log service's resources cache expire seconds + LogResourceCacheExpireSec = flag.Int("ALICLOUD_LOG_CACHE_EXPIRE_SEC", 600, "log service's resources cache expire seconds") + + // LogOperationMaxRetryTimes log service's operation max retry times + LogOperationMaxRetryTimes = flag.Int("ALICLOUD_LOG_OPERATION_MAX_TRY", 3, "log service's operation max retry times") + + // DefaultAccessKeyID your log service's access key id + DefaultAccessKeyID = flag.String("ALICLOUD_LOG_ACCESS_KEY_ID", "xxxxxxxxx", "your log service's access key id") + + // DefaultAccessKeySecret your log service's access key secret + DefaultAccessKeySecret = flag.String("ALICLOUD_LOG_ACCESS_KEY_SECRET", "xxxxxxxxx", "your log service's access key secret") + + // DefaultSTSToken your sts token + DefaultSTSToken = flag.String("ALICLOUD_LOG_STS_TOKEN", "", "set sts token if you use sts") + + // LogConfigPrefix config prefix + LogConfigPrefix = flag.String("ALICLOUD_LOG_CONFIG_PREFIX", "aliyun_logs_", "config prefix") + + // DockerEnvUpdateInterval docker env config update interval seconds + DockerEnvUpdateInterval = flag.Int("ALICLOUD_LOG_ENV_CONFIG_UPDATE_INTERVAL", 10, "docker env config update interval seconds") + + // ProductAPIDomain product domain + ProductAPIDomain = flag.String("ALICLOUD_LOG_PRODUCT_DOMAIN", "sls.aliyuncs.com", "product domain config") + + // DefaultRegion default log region" + DefaultRegion = flag.String("ALICLOUD_LOG_REGION", "", "default log region") + + SelfEnvConfigFlag bool + + GlobalConfig = flag.String("global", "./global.json", "global config.") + PluginConfig = flag.String("plugin", "./plugin.json", "plugin config.") + FlusherConfig = flag.String("flusher", "./default_flusher.json", "the default flusher configuration is used not only in the plugins without flusher but also to transfer the self telemetry data.") + ForceSelfCollect = flag.Bool("force-statics", false, "force collect self telemetry data before closing.") + AutoProfile = flag.Bool("prof-auto", true, "auto dump prof file when prof-flag is open.") + HTTPProfFlag = flag.Bool("prof-flag", false, "http pprof flag.") + Cpuprofile = flag.String("cpu-profile", "cpu.prof", "write cpu profile to file.") + Memprofile = flag.String("mem-profile", "mem.prof", "write mem profile to file.") + HTTPAddr = flag.String("server", ":18689", "http server address.") + Doc = flag.Bool("doc", false, "generate plugin docs") + DocPath = flag.String("docpath", "./docs/en/plugins", "generate plugin docs") + HTTPLoadFlag = flag.Bool("http-load", false, "export http endpoint for load plugin config.") + FileIOFlag = flag.Bool("file-io", false, "use file for input or output.") + InputFile = flag.String("input-file", "./input.log", "input file") + InputField = flag.String("input-field", "content", "input file") + InputLineLimit = flag.Int("input-line-limit", 1000, "input file") + OutputFile = flag.String("output-file", "./output.log", "output file") + StatefulSetFlag = flag.Bool("ALICLOUD_LOG_STATEFULSET_FLAG", false, "alibaba log export ports flag, set true if you want to use it") + + DeployMode = flag.String("DEPLOY_MODE", DeployDaemonset, "alibaba log deploy mode, daemonset or statefulset or singleton") + EnableKubernetesMeta = flag.Bool("ENABLE_KUBERNETES_META", false, "enable kubernetes meta") + ClusterID = flag.String("GLOBAL_CLUSTER_ID", "", "cluster id") +) func init() { _ = util.InitFromEnvBool("ALICLOUD_LOG_K8S_FLAG", K8sFlag, *K8sFlag) @@ -90,6 +135,16 @@ func init() { _ = util.InitFromEnvString("ALICLOUD_LOG_REGION", DefaultRegion, *DefaultRegion) _ = util.InitFromEnvBool("ALICLOUD_LOG_PLUGIN_ENV_CONFIG", DockerConfigPluginInitFlag, *DockerConfigPluginInitFlag) + _ = util.InitFromEnvBool("LOGTAIL_DEBUG_FLAG", HTTPProfFlag, *HTTPProfFlag) + _ = util.InitFromEnvBool("LOGTAIL_AUTO_PROF", AutoProfile, *AutoProfile) + _ = util.InitFromEnvBool("LOGTAIL_FORCE_COLLECT_SELF_TELEMETRY", ForceSelfCollect, *ForceSelfCollect) + _ = util.InitFromEnvBool("LOGTAIL_HTTP_LOAD_CONFIG", HTTPLoadFlag, *HTTPLoadFlag) + _ = util.InitFromEnvBool("ALICLOUD_LOG_STATEFULSET_FLAG", StatefulSetFlag, *StatefulSetFlag) + + _ = util.InitFromEnvString("DEPLOY_MODE", DeployMode, *DeployMode) + _ = util.InitFromEnvBool("ENABLE_KUBERNETES_META", EnableKubernetesMeta, *EnableKubernetesMeta) + _ = util.InitFromEnvString("GLOBAL_CLUSTER_ID", ClusterID, *ClusterID) + if len(*DefaultRegion) == 0 { *DefaultRegion = util.GuessRegionByEndpoint(*LogServiceEndpoint, "cn-hangzhou") logger.Info(context.Background(), "guess region by endpoint, endpoint", *LogServiceEndpoint, "region", *DefaultRegion) @@ -101,3 +156,39 @@ func init() { _ = util.InitFromEnvBool("ALICLOUD_LOG_DOCKER_ENV_CONFIG_SELF", &SelfEnvConfigFlag, false) } } + +// GetFlusherConfiguration returns the flusher category and options. +func GetFlusherConfiguration() (flusherCategory string, flusherOptions map[string]interface{}) { + flusherLoadOnce.Do(func() { + extract := func(cfg []byte) (string, map[string]interface{}, bool) { + m := make(map[string]interface{}) + err := json.Unmarshal(cfg, &m) + if err != nil { + logger.Error(context.Background(), "DEFAULT_FLUSHER_ALARM", "err", err) + return "", nil, false + } + c, ok := m["type"].(string) + if !ok { + return "", nil, false + } + options, ok := m["detail"].(map[string]interface{}) + if !ok { + return c, nil, true + } + return c, options, true + } + if fCfg, err := os.ReadFile(*FlusherConfig); err == nil { + category, options, ok := extract(fCfg) + if ok { + flusherType = category + flusherCfg = options + } else { + flusherType, flusherCfg, _ = extract([]byte(DefaultFlusherConfig)) + } + } else { + flusherType, flusherCfg, _ = extract([]byte(DefaultFlusherConfig)) + } + + }) + return flusherType, flusherCfg +} diff --git a/pkg/helper/container_discover_controller.go b/pkg/helper/container_discover_controller.go index c06eb72947..2a8604df7b 100644 --- a/pkg/helper/container_discover_controller.go +++ b/pkg/helper/container_discover_controller.go @@ -184,12 +184,7 @@ func (c *ContainerDiscoverManager) Init() bool { c.enableCRIDiscover = criRuntimeWrapper != nil c.enableDockerDiscover = dockerCenterInstance.initClient() == nil c.enableStaticDiscover = isStaticContainerInfoEnabled() - discoverdRuntime := false - if len(os.Getenv("USE_CONTAINERD")) > 0 { - discoverdRuntime = c.enableCRIDiscover - } else { - discoverdRuntime = c.enableCRIDiscover || c.enableDockerDiscover || c.enableStaticDiscover - } + discoverdRuntime := c.enableCRIDiscover || c.enableDockerDiscover || c.enableStaticDiscover if !discoverdRuntime { return false } diff --git a/pkg/helper/k8smeta/k8s_meta_cache.go b/pkg/helper/k8smeta/k8s_meta_cache.go new file mode 100644 index 0000000000..7fcdda4c6b --- /dev/null +++ b/pkg/helper/k8smeta/k8s_meta_cache.go @@ -0,0 +1,279 @@ +package k8smeta + +import ( + "context" + "fmt" + "time" + + app "k8s.io/api/apps/v1" + batch "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1" + storage "k8s.io/api/storage/v1" + meta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "github.com/alibaba/ilogtail/pkg/logger" +) + +type k8sMetaCache struct { + metaStore *DeferredDeletionMetaStore + clientset *kubernetes.Clientset + + eventCh chan *K8sMetaEvent + stopCh chan struct{} + + resourceType string + schema *runtime.Scheme +} + +func newK8sMetaCache(stopCh chan struct{}, resourceType string) *k8sMetaCache { + idxRules := getIdxRules(resourceType) + m := &k8sMetaCache{} + m.eventCh = make(chan *K8sMetaEvent, 100) + m.stopCh = stopCh + m.metaStore = NewDeferredDeletionMetaStore(m.eventCh, m.stopCh, 120, cache.MetaNamespaceKeyFunc, idxRules...) + m.resourceType = resourceType + m.schema = runtime.NewScheme() + _ = v1.AddToScheme(m.schema) + _ = batch.AddToScheme(m.schema) + _ = app.AddToScheme(m.schema) + _ = networking.AddToScheme(m.schema) + _ = storage.AddToScheme(m.schema) + return m +} + +func (m *k8sMetaCache) init(clientset *kubernetes.Clientset) { + m.clientset = clientset + m.metaStore.Start() + m.watch(m.stopCh) +} + +func (m *k8sMetaCache) Get(key []string) map[string][]*ObjectWrapper { + return m.metaStore.Get(key) +} + +func (m *k8sMetaCache) List() []*ObjectWrapper { + return m.metaStore.List() +} + +func (m *k8sMetaCache) RegisterSendFunc(key string, sendFunc SendFunc, interval int) { + m.metaStore.RegisterSendFunc(key, sendFunc, interval) +} + +func (m *k8sMetaCache) UnRegisterSendFunc(key string) { + m.metaStore.UnRegisterSendFunc(key) +} + +func (m *k8sMetaCache) watch(stopCh <-chan struct{}) { + factory, informer := m.getFactoryInformer() + if informer == nil { + return + } + informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + nowTime := time.Now().Unix() + m.eventCh <- &K8sMetaEvent{ + EventType: EventTypeAdd, + Object: &ObjectWrapper{ + ResourceType: m.resourceType, + Raw: m.preProcess(obj), + FirstObservedTime: nowTime, + LastObservedTime: nowTime, + }, + } + }, + UpdateFunc: func(oldObj interface{}, obj interface{}) { + nowTime := time.Now().Unix() + m.eventCh <- &K8sMetaEvent{ + EventType: EventTypeUpdate, + Object: &ObjectWrapper{ + ResourceType: m.resourceType, + Raw: m.preProcess(obj), + FirstObservedTime: nowTime, + LastObservedTime: nowTime, + }, + } + }, + DeleteFunc: func(obj interface{}) { + m.eventCh <- &K8sMetaEvent{ + EventType: EventTypeDelete, + Object: &ObjectWrapper{ + ResourceType: m.resourceType, + Raw: m.preProcess(obj), + LastObservedTime: time.Now().Unix(), + }, + } + }, + }) + go factory.Start(stopCh) + // wait infinite for first cache sync success + for { + if !cache.WaitForCacheSync(stopCh, informer.HasSynced) { + logger.Error(context.Background(), "K8S_META_CACHE_SYNC_TIMEOUT", "service cache sync timeout") + time.Sleep(1 * time.Second) + } else { + break + } + } +} + +func (m *k8sMetaCache) getFactoryInformer() (informers.SharedInformerFactory, cache.SharedIndexInformer) { + var factory informers.SharedInformerFactory + switch m.resourceType { + case POD: + factory = informers.NewSharedInformerFactory(m.clientset, time.Hour*24) + default: + factory = informers.NewSharedInformerFactory(m.clientset, time.Hour*1) + } + var informer cache.SharedIndexInformer + switch m.resourceType { + case POD: + informer = factory.Core().V1().Pods().Informer() + case SERVICE: + informer = factory.Core().V1().Services().Informer() + case DEPLOYMENT: + informer = factory.Apps().V1().Deployments().Informer() + case REPLICASET: + informer = factory.Apps().V1().ReplicaSets().Informer() + case STATEFULSET: + informer = factory.Apps().V1().StatefulSets().Informer() + case DAEMONSET: + informer = factory.Apps().V1().DaemonSets().Informer() + case CRONJOB: + informer = factory.Batch().V1().CronJobs().Informer() + case JOB: + informer = factory.Batch().V1().Jobs().Informer() + case NODE: + informer = factory.Core().V1().Nodes().Informer() + case NAMESPACE: + informer = factory.Core().V1().Namespaces().Informer() + case CONFIGMAP: + informer = factory.Core().V1().ConfigMaps().Informer() + case SECRET: + informer = factory.Core().V1().Secrets().Informer() + case PERSISTENTVOLUME: + informer = factory.Core().V1().PersistentVolumes().Informer() + case PERSISTENTVOLUMECLAIM: + informer = factory.Core().V1().PersistentVolumeClaims().Informer() + case STORAGECLASS: + informer = factory.Storage().V1().StorageClasses().Informer() + case INGRESS: + informer = factory.Networking().V1().Ingresses().Informer() + default: + logger.Error(context.Background(), "ENTITY_PIPELINE_REGISTER_ERROR", "resourceType not support", m.resourceType) + return factory, nil + } + return factory, informer +} + +func getIdxRules(resourceType string) []IdxFunc { + switch resourceType { + case NODE: + return []IdxFunc{generateNodeKey} + case POD: + return []IdxFunc{generateCommonKey, generatePodIPKey, generateContainerIDKey, generateHostIPKey} + default: + return []IdxFunc{generateCommonKey} + } +} + +func (m *k8sMetaCache) preProcess(obj interface{}) interface{} { + switch m.resourceType { + case POD: + return m.preProcessPod(obj) + default: + return m.preProcessCommon(obj) + } +} + +func (m *k8sMetaCache) preProcessCommon(obj interface{}) interface{} { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + logger.Error(context.Background(), "K8S_META_PRE_PROCESS_ERROR", "object is not runtime object", obj) + } + metaObj, err := meta.Accessor(runtimeObj) + if err != nil { + logger.Error(context.Background(), "K8S_META_PRE_PROCESS_ERROR", "object is not meta object", err) + } + // fill empty kind + if runtimeObj.GetObjectKind().GroupVersionKind().Empty() { + gvk, err := apiutil.GVKForObject(runtimeObj, m.schema) + if err != nil { + logger.Error(context.Background(), "K8S_META_PRE_PROCESS_ERROR", "get GVK for object error", err) + return obj + } + runtimeObj.GetObjectKind().SetGroupVersionKind(gvk) + } + // remove unnecessary annotations + if metaObj.GetAnnotations() != nil { + if _, ok := metaObj.GetAnnotations()["kubectl.kubernetes.io/last-applied-configuration"]; ok { + metaObj.GetAnnotations()["kubectl.kubernetes.io/last-applied-configuration"] = "" + } + } + return runtimeObj +} + +func (m *k8sMetaCache) preProcessPod(obj interface{}) interface{} { + m.preProcessCommon(obj) + pod, ok := obj.(*v1.Pod) + if !ok { + return obj + } + pod.ManagedFields = nil + pod.Status.Conditions = nil + pod.Spec.Tolerations = nil + return pod +} + +func generateCommonKey(obj interface{}) ([]string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return []string{}, err + } + return []string{generateNameWithNamespaceKey(meta.GetNamespace(), meta.GetName())}, nil +} + +func generateNodeKey(obj interface{}) ([]string, error) { + node, err := meta.Accessor(obj) + if err != nil { + return []string{}, err + } + return []string{node.GetName()}, nil +} + +func generateNameWithNamespaceKey(namespace, name string) string { + return fmt.Sprintf("%s/%s", namespace, name) +} + +func generatePodIPKey(obj interface{}) ([]string, error) { + pod, ok := obj.(*v1.Pod) + if !ok { + return []string{}, fmt.Errorf("object is not a pod") + } + return []string{pod.Status.PodIP}, nil +} + +func generateContainerIDKey(obj interface{}) ([]string, error) { + pod, ok := obj.(*v1.Pod) + if !ok { + return []string{}, fmt.Errorf("object is not a pod") + } + result := make([]string, len(pod.Status.ContainerStatuses)) + for i, containerStatus := range pod.Status.ContainerStatuses { + result[i] = containerStatus.ContainerID + } + return result, nil +} + +func generateHostIPKey(obj interface{}) ([]string, error) { + pod, ok := obj.(*v1.Pod) + if !ok { + return []string{}, fmt.Errorf("object is not a pod") + } + return []string{pod.Status.HostIP}, nil +} diff --git a/pkg/helper/k8smeta/k8s_meta_cache_pod.go b/pkg/helper/k8smeta/k8s_meta_cache_pod.go deleted file mode 100644 index c5f69eb6d8..0000000000 --- a/pkg/helper/k8smeta/k8s_meta_cache_pod.go +++ /dev/null @@ -1,203 +0,0 @@ -package k8smeta - -import ( - "context" - "fmt" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - - "github.com/alibaba/ilogtail/pkg/logger" -) - -type podCache struct { - metaStore *DeferredDeletionMetaStore - serviceMetaStore *DeferredDeletionMetaStore - clientset *kubernetes.Clientset - - eventCh chan *K8sMetaEvent - stopCh chan struct{} - - discardEventCount int - discardStartTime time.Time -} - -func newPodCache(stopCh chan struct{}) *podCache { - idxRules := []IdxFunc{ - generatePodIPKey, - generateContainerIDKey, - generateHostIPKey, - } - m := &podCache{} - m.stopCh = stopCh - m.eventCh = make(chan *K8sMetaEvent, 100) - m.metaStore = NewDeferredDeletionMetaStore(m.eventCh, m.stopCh, 120, cache.MetaNamespaceKeyFunc, idxRules...) - m.serviceMetaStore = NewDeferredDeletionMetaStore(m.eventCh, m.stopCh, 120, cache.MetaNamespaceKeyFunc) - return m -} - -func (m *podCache) init() { - m.metaStore.Start() - m.watch(m.stopCh) -} - -func (m *podCache) Get(key []string) map[string][]*ObjectWrapper { - return m.metaStore.Get(key) -} - -func (m *podCache) RegisterSendFunc(key string, sendFunc SendFunc, interval int) { - m.metaStore.RegisterSendFunc(key, func(kme *K8sMetaEvent) { - sendFunc(kme) - services := m.getPodServiceLink([]*ObjectWrapper{kme.Object}) - for _, service := range services { - sendFunc(&K8sMetaEvent{ - EventType: kme.EventType, - Object: service, - }) - } - }, interval) -} - -func (m *podCache) UnRegisterSendFunc(key string) { - m.metaStore.UnRegisterSendFunc(key) -} - -func generatePodIPKey(obj interface{}) ([]string, error) { - pod, ok := obj.(*v1.Pod) - if !ok { - return []string{}, fmt.Errorf("object is not a pod") - } - return []string{pod.Status.PodIP}, nil -} - -func generateContainerIDKey(obj interface{}) ([]string, error) { - pod, ok := obj.(*v1.Pod) - if !ok { - return []string{}, fmt.Errorf("object is not a pod") - } - result := make([]string, len(pod.Status.ContainerStatuses)) - for i, containerStatus := range pod.Status.ContainerStatuses { - result[i] = containerStatus.ContainerID - } - return result, nil -} - -func generateHostIPKey(obj interface{}) ([]string, error) { - pod, ok := obj.(*v1.Pod) - if !ok { - return []string{}, fmt.Errorf("object is not a pod") - } - return []string{pod.Status.HostIP}, nil -} - -func (m *podCache) watch(stopCh <-chan struct{}) { - factory := informers.NewSharedInformerFactory(m.clientset, time.Hour*24) - informer := factory.Core().V1().Pods().Informer() - informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - nowTime := time.Now().Unix() - m.saveWithTimeout(&K8sMetaEvent{ - EventType: EventTypeAdd, - Object: &ObjectWrapper{ - ResourceType: POD, - Raw: obj, - FirstObservedTime: nowTime, - LastObservedTime: nowTime, - }, - }) - }, - UpdateFunc: func(oldObj interface{}, obj interface{}) { - nowTime := time.Now().Unix() - m.saveWithTimeout(&K8sMetaEvent{ - EventType: EventTypeUpdate, - Object: &ObjectWrapper{ - ResourceType: POD, - Raw: obj, - FirstObservedTime: nowTime, - LastObservedTime: nowTime, - }, - }) - }, - DeleteFunc: func(obj interface{}) { - m.saveWithTimeout(&K8sMetaEvent{ - EventType: EventTypeDelete, - Object: &ObjectWrapper{ - ResourceType: POD, - Raw: obj, - LastObservedTime: time.Now().Unix(), - }, - }) - }, - }) - go factory.Start(stopCh) - // wait infinite for first cache sync success - for { - if !cache.WaitForCacheSync(stopCh, informer.HasSynced) { - logger.Error(context.Background(), "K8S_META_CACHE_SYNC_TIMEOUT", "pod cache sync timeout") - time.Sleep(1 * time.Second) - } else { - break - } - } -} - -func (m *podCache) saveWithTimeout(event *K8sMetaEvent) { - select { - case m.eventCh <- event: - case <-time.After(1 * time.Second): - m.discardEventCount++ - if m.discardEventCount == 10 { - logger.Warning(context.Background(), "K8S_META_CACHE_DISCARD_EVENT", "discard event count", m.discardEventCount, "from", m.discardStartTime.String(), "to", time.Now().String()) - m.discardEventCount = 0 - m.discardStartTime = time.Now() - } - } -} - -func (m *podCache) getPodServiceLink(podList []*ObjectWrapper) []*ObjectWrapper { - serviceList := m.serviceMetaStore.List() - results := make([]*ObjectWrapper, 0) - matchers := make(map[string]labelMatchers) - for _, data := range serviceList { - service, ok := data.Raw.(*v1.Service) - if !ok { - continue - } - - _, ok = matchers[service.Namespace] - lm := newLabelMatcher(data.Raw, labels.SelectorFromSet(service.Spec.Selector)) - if !ok { - matchers[service.Namespace] = []*labelMatcher{lm} - } else { - matchers[service.Namespace] = append(matchers[service.Namespace], lm) - } - } - - for _, data := range podList { - pod, ok := data.Raw.(*v1.Pod) - if !ok { - continue - } - nsSelectors, ok := matchers[pod.Namespace] - if !ok { - continue - } - set := labels.Set(pod.Labels) - for _, s := range nsSelectors { - if !s.selector.Empty() && s.selector.Matches(set) { - results = append(results, &ObjectWrapper{ - ResourceType: POD_SERVICE, - Raw: &PodService{ - Pod: pod, - Service: s.obj.(*v1.Service), - }, - }) - } - } - } - return results -} diff --git a/pkg/helper/k8smeta/k8s_meta_cache_service.go b/pkg/helper/k8smeta/k8s_meta_cache_service.go deleted file mode 100644 index 5c284a8a4f..0000000000 --- a/pkg/helper/k8smeta/k8s_meta_cache_service.go +++ /dev/null @@ -1,109 +0,0 @@ -package k8smeta - -import ( - "context" - "fmt" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - - "github.com/alibaba/ilogtail/pkg/logger" -) - -type serviceCache struct { - metaStore *DeferredDeletionMetaStore - clientset *kubernetes.Clientset - - eventCh chan *K8sMetaEvent - stopCh chan struct{} -} - -func newServiceCache(stopCh chan struct{}) *serviceCache { - idxRules := []IdxFunc{ - generateServiceNameKey, - } - m := &serviceCache{} - m.eventCh = make(chan *K8sMetaEvent, 100) - m.stopCh = stopCh - m.metaStore = NewDeferredDeletionMetaStore(m.eventCh, m.stopCh, 120, cache.MetaNamespaceKeyFunc, idxRules...) - return m -} - -func (m *serviceCache) init() { - m.metaStore.Start() - m.watch(m.stopCh) -} - -func (m *serviceCache) Get(key []string) map[string][]*ObjectWrapper { - return m.metaStore.Get(key) -} - -func (m *serviceCache) RegisterSendFunc(key string, sendFunc SendFunc, interval int) { - m.metaStore.RegisterSendFunc(key, sendFunc, interval) -} - -func (m *serviceCache) UnRegisterSendFunc(key string) { - m.metaStore.UnRegisterSendFunc(key) -} - -func generateServiceNameKey(obj interface{}) ([]string, error) { - service, ok := obj.(*v1.Service) - if !ok { - return []string{}, fmt.Errorf("object is not a service") - } - return []string{service.Name}, nil -} - -func (m *serviceCache) watch(stopCh <-chan struct{}) { - factory := informers.NewSharedInformerFactory(m.clientset, time.Hour*1) - informer := factory.Core().V1().Services().Informer() - informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - nowTime := time.Now().Unix() - m.eventCh <- &K8sMetaEvent{ - EventType: EventTypeAdd, - Object: &ObjectWrapper{ - ResourceType: SERVICE, - Raw: obj, - FirstObservedTime: nowTime, - LastObservedTime: nowTime, - }, - } - }, - UpdateFunc: func(oldObj interface{}, obj interface{}) { - nowTime := time.Now().Unix() - m.eventCh <- &K8sMetaEvent{ - EventType: EventTypeUpdate, - Object: &ObjectWrapper{ - ResourceType: SERVICE, - Raw: obj, - FirstObservedTime: nowTime, - LastObservedTime: nowTime, - }, - } - }, - DeleteFunc: func(obj interface{}) { - m.eventCh <- &K8sMetaEvent{ - EventType: EventTypeDelete, - Object: &ObjectWrapper{ - ResourceType: SERVICE, - Raw: obj, - LastObservedTime: time.Now().Unix(), - }, - } - }, - }) - go factory.Start(stopCh) - // wait infinite for first cache sync success - for { - if !cache.WaitForCacheSync(stopCh, informer.HasSynced) { - logger.Error(context.Background(), "K8S_META_CACHE_SYNC_TIMEOUT", "service cache sync timeout") - time.Sleep(1 * time.Second) - } else { - break - } - } -} diff --git a/pkg/helper/k8smeta/k8s_meta_const.go b/pkg/helper/k8smeta/k8s_meta_const.go index 9ecaf21670..e9e9b791ab 100644 --- a/pkg/helper/k8smeta/k8s_meta_const.go +++ b/pkg/helper/k8smeta/k8s_meta_const.go @@ -1,18 +1,128 @@ package k8smeta -import v1 "k8s.io/api/core/v1" +import ( + app "k8s.io/api/apps/v1" + batch "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" +) const ( // entity type - POD = "pod" - SERVICE = "service" + POD = "pod" + SERVICE = "service" + DEPLOYMENT = "deployment" + REPLICASET = "replicaset" + STATEFULSET = "statefulset" + DAEMONSET = "daemonset" + CRONJOB = "cronjob" + JOB = "job" + NODE = "node" + NAMESPACE = "namespace" + CONFIGMAP = "configmap" + SECRET = "secret" + PERSISTENTVOLUME = "persistentvolume" + PERSISTENTVOLUMECLAIM = "persistentvolumeclaim" + STORAGECLASS = "storageclass" + INGRESS = "ingress" + CONTAINER = "container" // entity link type //revive:disable:var-naming - LINK_SPLIT_CHARACTER = "_" - POD_SERVICE = "pod_service" + LINK_SPLIT_CHARACTER = "->" + POD_NODE = "pod->node" + REPLICASET_DEPLOYMENT = "replicaset->deployment" + POD_REPLICASET = "pod->replicaset" + POD_STATEFULSET = "pod->statefulset" + POD_DAEMONSET = "pod->daemonset" + JOB_CRONJOB = "job->cronjob" + POD_JOB = "pod->job" + POD_PERSISENTVOLUMECLAIN = "pod->persistentvolumeclaim" + POD_CONFIGMAP = "pod->configmap" + POD_SECRET = "pod->secret" + POD_SERVICE = "pod->service" + POD_CONTAINER = "pod->container" + POD_PROCESS = "pod->process" //revive:enable:var-naming ) +var AllResources = []string{ + POD, + SERVICE, + DEPLOYMENT, + REPLICASET, + STATEFULSET, + DAEMONSET, + CRONJOB, + JOB, + NODE, + NAMESPACE, + CONFIGMAP, + SECRET, + PERSISTENTVOLUME, + PERSISTENTVOLUMECLAIM, + STORAGECLASS, + INGRESS, +} + +type NodePod struct { + Node *v1.Node + Pod *v1.Pod +} + +type ReplicaSetDeployment struct { + Deployment *app.Deployment + ReplicaSet *app.ReplicaSet +} + +type PodReplicaSet struct { + ReplicaSet *app.ReplicaSet + Pod *v1.Pod +} + +type PodStatefulSet struct { + StatefulSet *app.StatefulSet + Pod *v1.Pod +} + +type PodDaemonSet struct { + DaemonSet *app.DaemonSet + Pod *v1.Pod +} + +type JobCronJob struct { + CronJob *batch.CronJob + Job *batch.Job +} + +type PodJob struct { + Job *batch.Job + Pod *v1.Pod +} + +type PodPersistentVolumeClaim struct { + Pod *v1.Pod + PersistentVolumeClaim *v1.PersistentVolumeClaim +} + +type PodConfigMap struct { + Pod *v1.Pod + ConfigMap *v1.ConfigMap +} + +type PodSecret struct { + Pod *v1.Pod + Secret *v1.Secret +} + +type PodService struct { + Service *v1.Service + Pod *v1.Pod +} + +type PodContainer struct { + Pod *v1.Pod + Container *v1.Container +} + const ( EventTypeAdd = "add" EventTypeUpdate = "update" @@ -31,8 +141,3 @@ type PodMetadata struct { Images map[string]string `json:"images"` IsDeleted bool `json:"-"` } - -type PodService struct { - Pod *v1.Pod - Service *v1.Service -} diff --git a/pkg/helper/k8smeta/k8s_meta_deferred_deletion_cache.go b/pkg/helper/k8smeta/k8s_meta_deferred_deletion_meta_store.go similarity index 95% rename from pkg/helper/k8smeta/k8s_meta_deferred_deletion_cache.go rename to pkg/helper/k8smeta/k8s_meta_deferred_deletion_meta_store.go index c73514e5a8..eaf4d853f9 100644 --- a/pkg/helper/k8smeta/k8s_meta_deferred_deletion_cache.go +++ b/pkg/helper/k8smeta/k8s_meta_deferred_deletion_meta_store.go @@ -175,7 +175,7 @@ func (m *DeferredDeletionMetaStore) handleAddEvent(event *K8sMetaEvent) { } m.lock.Unlock() m.sendFuncs.Range(func(key, value interface{}) bool { - value.(*SendFuncWithStopCh).SendFunc(event) + value.(*SendFuncWithStopCh).SendFunc([]*K8sMetaEvent{event}) return true }) } @@ -200,7 +200,7 @@ func (m *DeferredDeletionMetaStore) handleUpdateEvent(event *K8sMetaEvent) { } m.lock.Unlock() m.sendFuncs.Range(func(key, value interface{}) bool { - value.(*SendFuncWithStopCh).SendFunc(event) + value.(*SendFuncWithStopCh).SendFunc([]*K8sMetaEvent{event}) return true }) } @@ -218,7 +218,7 @@ func (m *DeferredDeletionMetaStore) handleDeleteEvent(event *K8sMetaEvent) { } m.lock.Unlock() m.sendFuncs.Range(func(key, value interface{}) bool { - value.(*SendFuncWithStopCh).SendFunc(event) + value.(*SendFuncWithStopCh).SendFunc([]*K8sMetaEvent{event}) return true }) go func() { @@ -279,15 +279,17 @@ func (m *DeferredDeletionMetaStore) handleTimerEvent(event *K8sMetaEvent) { timerEvent := event.Object.Raw.(*TimerEvent) if f, ok := m.sendFuncs.Load(timerEvent.ConfigName); ok { sendFuncWithStopCh := f.(*SendFuncWithStopCh) + allItems := make([]*K8sMetaEvent, 0) for _, obj := range m.Items { if !obj.Deleted { obj.LastObservedTime = time.Now().Unix() - sendFuncWithStopCh.SendFunc(&K8sMetaEvent{ + allItems = append(allItems, &K8sMetaEvent{ EventType: EventTypeUpdate, Object: obj, }) } } + sendFuncWithStopCh.SendFunc(allItems) } } diff --git a/pkg/helper/k8smeta/k8s_meta_deferred_deletion_cache_test.go b/pkg/helper/k8smeta/k8s_meta_deferred_deletion_meta_store_test.go similarity index 95% rename from pkg/helper/k8smeta/k8s_meta_deferred_deletion_cache_test.go rename to pkg/helper/k8smeta/k8s_meta_deferred_deletion_meta_store_test.go index 6041b2dcea..64f210599b 100644 --- a/pkg/helper/k8smeta/k8s_meta_deferred_deletion_cache_test.go +++ b/pkg/helper/k8smeta/k8s_meta_deferred_deletion_meta_store_test.go @@ -57,7 +57,7 @@ func TestRegisterWaitManagerReady(t *testing.T) { gracePeriod := 1 cache := NewDeferredDeletionMetaStore(eventCh, stopCh, int64(gracePeriod), cache.MetaNamespaceKeyFunc) manager := GetMetaManagerInstance() - cache.RegisterSendFunc("test", func(kme *K8sMetaEvent) {}, 100) + cache.RegisterSendFunc("test", func(kme []*K8sMetaEvent) {}, 100) select { case <-cache.eventCh: t.Error("should not receive event before manager is ready") @@ -88,7 +88,7 @@ func TestTimerSend(t *testing.T) { } cache.Start() resultCh := make(chan struct{}) - cache.RegisterSendFunc("test", func(kme *K8sMetaEvent) { + cache.RegisterSendFunc("test", func(kmes []*K8sMetaEvent) { resultCh <- struct{}{} }, 1) go func() { diff --git a/pkg/helper/k8smeta/k8s_meta_http_server.go b/pkg/helper/k8smeta/k8s_meta_http_server.go index 8f0582d61d..2aace0ce67 100644 --- a/pkg/helper/k8smeta/k8s_meta_http_server.go +++ b/pkg/helper/k8smeta/k8s_meta_http_server.go @@ -9,6 +9,7 @@ import ( "strings" "time" + app "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "github.com/alibaba/ilogtail/pkg/logger" @@ -77,9 +78,9 @@ func (m *metadataHandler) handlePodMetaByUniqueID(w http.ResponseWriter, r *http // Get the metadata metadata := make(map[string]*PodMetadata) - objs := m.metaManager.PodCache.Get(rBody.Keys) + objs := m.metaManager.cacheMap[POD].Get(rBody.Keys) for key, obj := range objs { - podMetadata := convertObj2PodMetadata(obj) + podMetadata := m.convertObj2PodMetadata(obj) if len(podMetadata) > 1 { logger.Warning(context.Background(), "Multiple pods found for unique ID", key) } @@ -115,9 +116,9 @@ func (m *metadataHandler) handlePodMetaByHostIP(w http.ResponseWriter, r *http.R // Get the metadata metadata := make(map[string]*PodMetadata) - objs := m.metaManager.PodCache.Get(rBody.Keys) + objs := m.metaManager.cacheMap[POD].Get(rBody.Keys) for _, obj := range objs { - podMetadata := convertObj2PodMetadata(obj) + podMetadata := m.convertObj2PodMetadata(obj) for i, meta := range podMetadata { pod := obj[i].Raw.(*v1.Pod) metadata[pod.Status.PodIP] = meta @@ -139,7 +140,7 @@ func (m *metadataHandler) handlePodMetaByHostIP(w http.ResponseWriter, r *http.R } } -func convertObj2PodMetadata(objs []*ObjectWrapper) []*PodMetadata { +func (m *metadataHandler) convertObj2PodMetadata(objs []*ObjectWrapper) []*PodMetadata { result := make([]*PodMetadata, 0) for _, obj := range objs { pod := obj.Raw.(*v1.Pod) @@ -167,6 +168,17 @@ func convertObj2PodMetadata(objs []*ObjectWrapper) []*PodMetadata { } else { podMetadata.WorkloadName = pod.GetOwnerReferences()[0].Name podMetadata.WorkloadKind = strings.ToLower(pod.GetOwnerReferences()[0].Kind) + if podMetadata.WorkloadKind == "replicaset" { + // replicaset -> deployment + replicasets := m.metaManager.cacheMap[REPLICASET].Get([]string{podMetadata.WorkloadName}) + for _, replicaset := range replicasets[podMetadata.WorkloadName] { + if len(replicaset.Raw.(*app.ReplicaSet).OwnerReferences) > 0 { + podMetadata.WorkloadName = replicaset.Raw.(*app.ReplicaSet).OwnerReferences[0].Name + podMetadata.WorkloadKind = strings.ToLower(replicaset.Raw.(*app.ReplicaSet).OwnerReferences[0].Kind) + break + } + } + } } result = append(result, podMetadata) } diff --git a/pkg/helper/k8smeta/k8s_meta_link.go b/pkg/helper/k8smeta/k8s_meta_link.go new file mode 100644 index 0000000000..808d4df827 --- /dev/null +++ b/pkg/helper/k8smeta/k8s_meta_link.go @@ -0,0 +1,399 @@ +package k8smeta + +import ( + "strings" + + app "k8s.io/api/apps/v1" + batch "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +type LinkGenerator struct { + metaCache map[string]MetaCache +} + +func NewK8sMetaLinkGenerator(metaCache map[string]MetaCache) *LinkGenerator { + return &LinkGenerator{ + metaCache: metaCache, + } +} + +func (g *LinkGenerator) GenerateLinks(events []*K8sMetaEvent, linkType string) []*K8sMetaEvent { + if len(events) == 0 { + return nil + } + resourceType := events[0].Object.ResourceType + // only generate link from the src entity + if !strings.HasPrefix(linkType, resourceType) { + return nil + } + switch linkType { + case POD_NODE: + return g.getPodNodeLink(events) + case REPLICASET_DEPLOYMENT: + return g.getReplicaSetDeploymentLink(events) + case POD_REPLICASET, POD_STATEFULSET, POD_DAEMONSET, POD_JOB: + return g.getParentPodLink(events) + case JOB_CRONJOB: + return g.getJobCronJobLink(events) + case POD_PERSISENTVOLUMECLAIN: + return g.getPodPVCLink(events) + case POD_CONFIGMAP: + return g.getPodConfigMapLink(events) + case POD_SECRET: + return g.getPodSecretLink(events) + case POD_SERVICE: + return g.getPodServiceLink(events) + case POD_CONTAINER: + return g.getPodContainerLink(events) + default: + return nil + } +} + +func (g *LinkGenerator) getPodNodeLink(events []*K8sMetaEvent) []*K8sMetaEvent { + nodeCache := g.metaCache[NODE] + result := make([]*K8sMetaEvent, 0) + for _, event := range events { + pod, ok := event.Object.Raw.(*v1.Pod) + if !ok { + continue + } + nodes := nodeCache.Get([]string{pod.Spec.NodeName}) + for _, node := range nodes { + for _, n := range node { + result = append(result, &K8sMetaEvent{ + EventType: event.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_NODE, + Raw: &NodePod{ + Node: n.Raw.(*v1.Node), + Pod: pod, + }, + FirstObservedTime: event.Object.FirstObservedTime, + LastObservedTime: event.Object.LastObservedTime, + }, + }) + } + } + } + return result +} + +func (g *LinkGenerator) getReplicaSetDeploymentLink(events []*K8sMetaEvent) []*K8sMetaEvent { + result := make([]*K8sMetaEvent, 0) + for _, event := range events { + replicaset, ok := event.Object.Raw.(*app.ReplicaSet) + if !ok || len(replicaset.OwnerReferences) == 0 { + continue + } + deploymentName := replicaset.OwnerReferences[0].Name + deployments := g.metaCache[DEPLOYMENT].Get([]string{generateNameWithNamespaceKey(replicaset.Namespace, deploymentName)}) + for _, deployment := range deployments { + for _, d := range deployment { + result = append(result, &K8sMetaEvent{ + EventType: event.EventType, + Object: &ObjectWrapper{ + ResourceType: REPLICASET_DEPLOYMENT, + Raw: &ReplicaSetDeployment{ + Deployment: d.Raw.(*app.Deployment), + ReplicaSet: replicaset, + }, + FirstObservedTime: event.Object.FirstObservedTime, + LastObservedTime: event.Object.LastObservedTime, + }, + }) + } + } + } + return result +} + +func (g *LinkGenerator) getParentPodLink(podList []*K8sMetaEvent) []*K8sMetaEvent { + result := make([]*K8sMetaEvent, 0) + for _, data := range podList { + pod, ok := data.Object.Raw.(*v1.Pod) + if !ok || len(pod.OwnerReferences) == 0 { + continue + } + parentName := pod.OwnerReferences[0].Name + switch pod.OwnerReferences[0].Kind { + case "ReplicaSet": + rsList := g.metaCache[REPLICASET].Get([]string{generateNameWithNamespaceKey(pod.Namespace, parentName)}) + for _, rs := range rsList { + for _, r := range rs { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_REPLICASET, + Raw: &PodReplicaSet{ + ReplicaSet: r.Raw.(*app.ReplicaSet), + Pod: pod, + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + case "StatefulSet": + ssList := g.metaCache[STATEFULSET].Get([]string{generateNameWithNamespaceKey(pod.Namespace, parentName)}) + for _, ss := range ssList { + for _, s := range ss { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_STATEFULSET, + Raw: &PodStatefulSet{ + StatefulSet: s.Raw.(*app.StatefulSet), + Pod: pod, + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + case "DaemonSet": + dsList := g.metaCache[DAEMONSET].Get([]string{generateNameWithNamespaceKey(pod.Namespace, parentName)}) + for _, ds := range dsList { + for _, d := range ds { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_DAEMONSET, + Raw: &PodDaemonSet{ + DaemonSet: d.Raw.(*app.DaemonSet), + Pod: pod, + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + case "Job": + jobList := g.metaCache[JOB].Get([]string{generateNameWithNamespaceKey(pod.Namespace, parentName)}) + for _, job := range jobList { + for _, j := range job { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_JOB, + Raw: &PodJob{ + Job: j.Raw.(*batch.Job), + Pod: pod, + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + } + } + return result +} + +func (g *LinkGenerator) getJobCronJobLink(jobList []*K8sMetaEvent) []*K8sMetaEvent { + result := make([]*K8sMetaEvent, 0) + for _, data := range jobList { + job, ok := data.Object.Raw.(*batch.Job) + if !ok || len(job.OwnerReferences) == 0 { + continue + } + cronJobName := job.OwnerReferences[0].Name + cronJobList := g.metaCache[CRONJOB].Get([]string{generateNameWithNamespaceKey(job.Namespace, cronJobName)}) + for _, cj := range cronJobList { + for _, c := range cj { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: JOB_CRONJOB, + Raw: &JobCronJob{ + CronJob: c.Raw.(*batch.CronJob), + Job: job, + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + } + return result +} + +func (g *LinkGenerator) getPodPVCLink(podList []*K8sMetaEvent) []*K8sMetaEvent { + result := make([]*K8sMetaEvent, 0) + for _, data := range podList { + pod, ok := data.Object.Raw.(*v1.Pod) + if !ok { + continue + } + for _, volume := range pod.Spec.Volumes { + if volume.PersistentVolumeClaim != nil { + pvcName := volume.PersistentVolumeClaim.ClaimName + pvcList := g.metaCache[PERSISTENTVOLUMECLAIM].Get([]string{generateNameWithNamespaceKey(pod.Namespace, pvcName)}) + for _, pvc := range pvcList { + for _, p := range pvc { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_PERSISENTVOLUMECLAIN, + Raw: &PodPersistentVolumeClaim{ + Pod: pod, + PersistentVolumeClaim: p.Raw.(*v1.PersistentVolumeClaim), + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + } + } + } + return result +} + +func (g *LinkGenerator) getPodConfigMapLink(podList []*K8sMetaEvent) []*K8sMetaEvent { + result := make([]*K8sMetaEvent, 0) + for _, data := range podList { + pod, ok := data.Object.Raw.(*v1.Pod) + if !ok { + continue + } + for _, volume := range pod.Spec.Volumes { + if volume.ConfigMap != nil { + cmName := volume.ConfigMap.Name + cmList := g.metaCache[CONFIGMAP].Get([]string{generateNameWithNamespaceKey(pod.Namespace, cmName)}) + for _, cm := range cmList { + for _, c := range cm { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_CONFIGMAP, + Raw: &PodConfigMap{ + Pod: pod, + ConfigMap: c.Raw.(*v1.ConfigMap), + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + } + } + } + return result +} + +func (g *LinkGenerator) getPodSecretLink(podList []*K8sMetaEvent) []*K8sMetaEvent { + result := make([]*K8sMetaEvent, 0) + for _, data := range podList { + pod, ok := data.Object.Raw.(*v1.Pod) + if !ok { + continue + } + for _, volume := range pod.Spec.Volumes { + if volume.Secret != nil { + secretName := volume.Secret.SecretName + secretList := g.metaCache[SECRET].Get([]string{generateNameWithNamespaceKey(pod.Namespace, secretName)}) + for _, secret := range secretList { + for _, s := range secret { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_SECRET, + Raw: &PodSecret{ + Pod: pod, + Secret: s.Raw.(*v1.Secret), + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + } + } + } + return result +} + +func (g *LinkGenerator) getPodServiceLink(podList []*K8sMetaEvent) []*K8sMetaEvent { + serviceList := g.metaCache[SERVICE].List() + result := make([]*K8sMetaEvent, 0) + matchers := make(map[string]labelMatchers) + for _, data := range serviceList { + service, ok := data.Raw.(*v1.Service) + if !ok { + continue + } + + _, ok = matchers[service.Namespace] + lm := newLabelMatcher(data.Raw, labels.SelectorFromSet(service.Spec.Selector)) + if !ok { + matchers[service.Namespace] = []*labelMatcher{lm} + } else { + matchers[service.Namespace] = append(matchers[service.Namespace], lm) + } + } + + for _, data := range podList { + pod, ok := data.Object.Raw.(*v1.Pod) + if !ok { + continue + } + nsSelectors, ok := matchers[pod.Namespace] + if !ok { + continue + } + set := labels.Set(pod.Labels) + for _, s := range nsSelectors { + if !s.selector.Empty() && s.selector.Matches(set) { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_SERVICE, + Raw: &PodService{ + Pod: pod, + Service: s.obj.(*v1.Service), + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + } + return result +} + +func (g *LinkGenerator) getPodContainerLink(podList []*K8sMetaEvent) []*K8sMetaEvent { + result := make([]*K8sMetaEvent, 0) + for _, data := range podList { + pod, ok := data.Object.Raw.(*v1.Pod) + if !ok { + continue + } + for i := range pod.Spec.Containers { + result = append(result, &K8sMetaEvent{ + EventType: data.EventType, + Object: &ObjectWrapper{ + ResourceType: POD_CONTAINER, + Raw: &PodContainer{ + Pod: pod, + Container: &pod.Spec.Containers[i], + }, + FirstObservedTime: data.Object.FirstObservedTime, + LastObservedTime: data.Object.LastObservedTime, + }, + }) + } + } + return result +} diff --git a/pkg/helper/k8smeta/k8s_meta_cache_pod_test.go b/pkg/helper/k8smeta/k8s_meta_link_test.go similarity index 55% rename from pkg/helper/k8smeta/k8s_meta_cache_pod_test.go rename to pkg/helper/k8smeta/k8s_meta_link_test.go index 06b697f279..45c3d7ee3d 100644 --- a/pkg/helper/k8smeta/k8s_meta_cache_pod_test.go +++ b/pkg/helper/k8smeta/k8s_meta_link_test.go @@ -9,11 +9,12 @@ import ( ) func TestGetPodServiceLink(t *testing.T) { - podCache := newPodCache(make(chan struct{})) - podCache.serviceMetaStore.Items["default/test"] = &ObjectWrapper{ + podCache := newK8sMetaCache(make(chan struct{}), POD) + serviceCache := newK8sMetaCache(make(chan struct{}), SERVICE) + serviceCache.metaStore.Items["default/service1"] = &ObjectWrapper{ Raw: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test", + Name: "service1", Namespace: "default", }, Spec: corev1.ServiceSpec{ @@ -23,10 +24,10 @@ func TestGetPodServiceLink(t *testing.T) { }, }, } - podCache.serviceMetaStore.Items["default/test2"] = &ObjectWrapper{ + serviceCache.metaStore.Items["default/service2"] = &ObjectWrapper{ Raw: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test2", + Name: "service2", Namespace: "default", }, Spec: corev1.ServiceSpec{ @@ -36,10 +37,10 @@ func TestGetPodServiceLink(t *testing.T) { }, }, } - podCache.metaStore.Items["default/test"] = &ObjectWrapper{ + podCache.metaStore.Items["default/pod1"] = &ObjectWrapper{ Raw: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "test", + Name: "pod1", Namespace: "default", Labels: map[string]string{ "app": "test", @@ -56,10 +57,10 @@ func TestGetPodServiceLink(t *testing.T) { }, }, } - podCache.metaStore.Items["default/test2"] = &ObjectWrapper{ + podCache.metaStore.Items["default/pod2"] = &ObjectWrapper{ Raw: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "test2", + Name: "pod2", Namespace: "default", Labels: map[string]string{ "app": "test2", @@ -76,12 +77,22 @@ func TestGetPodServiceLink(t *testing.T) { }, }, } - podList := []*ObjectWrapper{ - podCache.metaStore.Items["default/test"], - podCache.metaStore.Items["default/test2"], + linkGenerator := NewK8sMetaLinkGenerator(map[string]MetaCache{ + POD: podCache, + SERVICE: serviceCache, + }) + podList := []*K8sMetaEvent{ + { + EventType: "update", + Object: podCache.metaStore.Items["default/pod1"], + }, + { + EventType: "update", + Object: podCache.metaStore.Items["default/pod2"], + }, } - results := podCache.getPodServiceLink(podList) + results := linkGenerator.getPodServiceLink(podList) assert.Equal(t, 2, len(results)) - assert.Equal(t, "test", results[0].Raw.(*PodService).Service.Name) - assert.Equal(t, "test2", results[1].Raw.(*PodService).Service.Name) + assert.Equal(t, "service1", results[0].Object.Raw.(*PodService).Service.Name) + assert.Equal(t, "service2", results[1].Object.Raw.(*PodService).Service.Name) } diff --git a/pkg/helper/k8smeta/k8s_meta_manager.go b/pkg/helper/k8smeta/k8s_meta_manager.go index f12a5e4a66..dc4f3211e5 100644 --- a/pkg/helper/k8smeta/k8s_meta_manager.go +++ b/pkg/helper/k8smeta/k8s_meta_manager.go @@ -2,15 +2,20 @@ package k8smeta import ( "context" + "fmt" + "strings" "sync" "sync/atomic" + "time" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" controllerConfig "sigs.k8s.io/controller-runtime/pkg/client/config" + "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" + "github.com/alibaba/ilogtail/pkg/pipeline" ) var metaManager *MetaManager @@ -19,9 +24,10 @@ var onceManager sync.Once type MetaCache interface { Get(key []string) map[string][]*ObjectWrapper + List() []*ObjectWrapper RegisterSendFunc(key string, sendFunc SendFunc, interval int) UnRegisterSendFunc(key string) - init() + init(*kubernetes.Clientset) watch(stopCh <-chan struct{}) } @@ -37,8 +43,12 @@ type MetaManager struct { eventCh chan *K8sMetaEvent ready atomic.Bool - PodCache *podCache - ServiceCache *serviceCache + cacheMap map[string]MetaCache + linkGenerator *LinkGenerator + linkRegisterMap map[string][]string + linkRegisterLock sync.RWMutex + + metricContext pipeline.Context } func GetMetaManagerInstance() *MetaManager { @@ -47,8 +57,12 @@ func GetMetaManagerInstance() *MetaManager { stopCh: make(chan struct{}), eventCh: make(chan *K8sMetaEvent, 1000), } - metaManager.PodCache = newPodCache(metaManager.stopCh) - metaManager.ServiceCache = newServiceCache(metaManager.stopCh) + metaManager.cacheMap = make(map[string]MetaCache) + for _, resource := range AllResources { + metaManager.cacheMap[resource] = newK8sMetaCache(metaManager.stopCh, resource) + } + metaManager.linkGenerator = NewK8sMetaLinkGenerator(metaManager.cacheMap) + metaManager.linkRegisterMap = make(map[string][]string) }) return metaManager } @@ -70,15 +84,15 @@ func (m *MetaManager) Init(configPath string) (err error) { return err } m.clientset = clientset + m.metricContext = &helper.LocalContext{} go func() { - m.ServiceCache.clientset = m.clientset - m.ServiceCache.init() - m.PodCache.clientset = m.clientset - m.PodCache.serviceMetaStore = m.ServiceCache.metaStore - m.PodCache.init() + startTime := time.Now() + for _, cache := range m.cacheMap { + cache.init(clientset) + } m.ready.Store(true) - logger.Info(context.Background(), "init k8s meta manager", "success") + logger.Info(context.Background(), "init k8s meta manager", "success", "latancy (ms)", fmt.Sprintf("%d", time.Since(startTime).Milliseconds())) }() return nil } @@ -93,28 +107,53 @@ func (m *MetaManager) IsReady() bool { } func (m *MetaManager) RegisterSendFunc(configName string, resourceType string, sendFunc SendFunc, interval int) { - switch resourceType { - case POD: - m.PodCache.RegisterSendFunc(configName, sendFunc, interval) - case SERVICE: - m.ServiceCache.RegisterSendFunc(configName, sendFunc, interval) - default: + if cache, ok := m.cacheMap[resourceType]; ok { + cache.RegisterSendFunc(configName, func(events []*K8sMetaEvent) { + sendFunc(events) + linkTypeList := make([]string, 0) + m.linkRegisterLock.RLock() + if m.linkRegisterMap[configName] != nil { + linkTypeList = append(linkTypeList, m.linkRegisterMap[configName]...) + } + m.linkRegisterLock.RUnlock() + for _, linkType := range linkTypeList { + linkEvents := m.linkGenerator.GenerateLinks(events, linkType) + if linkEvents != nil { + sendFunc(linkEvents) + } + } + }, interval) + return + } + if !isEntity(resourceType) { + m.linkRegisterLock.Lock() + if _, ok := m.linkRegisterMap[configName]; !ok { + m.linkRegisterMap[configName] = make([]string, 0) + } + m.linkRegisterMap[configName] = append(m.linkRegisterMap[configName], resourceType) + m.linkRegisterLock.Unlock() + } else { logger.Error(context.Background(), "ENTITY_PIPELINE_REGISTER_ERROR", "resourceType not support", resourceType) } } func (m *MetaManager) UnRegisterSendFunc(configName string, resourceType string) { - switch resourceType { - case POD: - m.PodCache.UnRegisterSendFunc(configName) - case SERVICE: - m.ServiceCache.UnRegisterSendFunc(configName) - default: + if cache, ok := m.cacheMap[resourceType]; ok { + cache.UnRegisterSendFunc(configName) + } else { logger.Error(context.Background(), "ENTITY_PIPELINE_UNREGISTER_ERROR", "resourceType not support", resourceType) } } +func (m *MetaManager) GetMetricContext() pipeline.Context { + return m.metricContext +} + func (m *MetaManager) runServer() { metadataHandler := newMetadataHandler() go metadataHandler.K8sServerRun(m.stopCh) } + +func isEntity(resourceType string) bool { + return !strings.Contains(resourceType, LINK_SPLIT_CHARACTER) +} diff --git a/pkg/helper/k8smeta/k8s_meta_store_interface.go b/pkg/helper/k8smeta/k8s_meta_store_interface.go index 7e84da7ff9..d7e9c0f33d 100644 --- a/pkg/helper/k8smeta/k8s_meta_store_interface.go +++ b/pkg/helper/k8smeta/k8s_meta_store_interface.go @@ -24,7 +24,7 @@ type ObjectWrapper struct { type IdxFunc func(obj interface{}) ([]string, error) -type SendFunc func(event *K8sMetaEvent) +type SendFunc func(events []*K8sMetaEvent) func panicRecover() { if err := recover(); err != nil { diff --git a/plugin_main/flags/flags.go b/plugin_main/flags/flags.go deleted file mode 100644 index 2ad9f1b95f..0000000000 --- a/plugin_main/flags/flags.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2021 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "os" - "sync" - - "github.com/alibaba/ilogtail/pkg/logger" - "github.com/alibaba/ilogtail/pkg/util" -) - -const ( - defaultGlobalConfig = `{"InputIntervalMs":5000,"AggregatIntervalMs":30,"FlushIntervalMs":30,"DefaultLogQueueSize":11,"DefaultLogGroupQueueSize":12}` - defaultPluginConfig = `{"inputs":[{"type":"metric_mock","detail":{"Tags":{"tag1":"aaaa","tag2":"bbb"},"Fields":{"content":"xxxxx","time":"2017.09.12 20:55:36"}}}],"flushers":[{"type":"flusher_stdout"}]}` - defaultFlusherConfig = `{"type":"flusher_sls","detail":{}}` -) - -const ( - DeployDaemonset = "daemonset" - DeployStatefulSet = "statefulset" - DeploySingleton = "singleton" -) - -// flags used to control ilogtail. -var ( - GlobalConfig = flag.String("global", "./global.json", "global config.") - PluginConfig = flag.String("plugin", "./plugin.json", "plugin config.") - FlusherConfig = flag.String("flusher", "./default_flusher.json", "the default flusher configuration is used not only in the plugins without flusher but also to transfer the self telemetry data.") - ForceSelfCollect = flag.Bool("force-statics", false, "force collect self telemetry data before closing.") - AutoProfile = flag.Bool("prof-auto", true, "auto dump prof file when prof-flag is open.") - HTTPProfFlag = flag.Bool("prof-flag", false, "http pprof flag.") - Cpuprofile = flag.String("cpu-profile", "cpu.prof", "write cpu profile to file.") - Memprofile = flag.String("mem-profile", "mem.prof", "write mem profile to file.") - HTTPAddr = flag.String("server", ":18689", "http server address.") - Doc = flag.Bool("doc", false, "generate plugin docs") - DocPath = flag.String("docpath", "./docs/en/plugins", "generate plugin docs") - HTTPLoadFlag = flag.Bool("http-load", false, "export http endpoint for load plugin config.") - FileIOFlag = flag.Bool("file-io", false, "use file for input or output.") - InputFile = flag.String("input-file", "./input.log", "input file") - InputField = flag.String("input-field", "content", "input file") - InputLineLimit = flag.Int("input-line-limit", 1000, "input file") - OutputFile = flag.String("output-file", "./output.log", "output file") - DeployMode = flag.String("DEPLOY_MODE", DeployDaemonset, "alibaba log deploy mode, daemonset or statefulset or singleton") - StatefulSetFlag = flag.Bool("ALICLOUD_LOG_STATEFULSET_FLAG", false, "alibaba log export ports flag, set true if you want to use it") - EnableKubernetesMeta = flag.Bool("ENABLE_KUBERNETES_META", false, "enable kubernetes meta") -) - -var ( - flusherType string - flusherCfg map[string]interface{} - flusherLoadOnce sync.Once -) - -// LoadConfig read the plugin content. -func LoadConfig() (globalCfg string, pluginCfgs []string, err error) { - if gCfg, errRead := os.ReadFile(*GlobalConfig); errRead != nil { - globalCfg = defaultGlobalConfig - } else { - globalCfg = string(gCfg) - } - - if !json.Valid([]byte(globalCfg)) { - err = fmt.Errorf("illegal input global config:%s", globalCfg) - return - } - - var pluginCfg string - if pCfg, errRead := os.ReadFile(*PluginConfig); errRead == nil { - pluginCfg = string(pCfg) - } else { - pluginCfg = defaultPluginConfig - } - - if !json.Valid([]byte(pluginCfg)) { - err = fmt.Errorf("illegal input plugin config:%s", pluginCfg) - return - } - - var cfgs []map[string]interface{} - errUnmarshal := json.Unmarshal([]byte(pluginCfg), &cfgs) - if errUnmarshal != nil { - pluginCfgs = append(pluginCfgs, changePluginConfigIO(pluginCfg)) - return - } - for _, cfg := range cfgs { - bytes, _ := json.Marshal(cfg) - pluginCfgs = append(pluginCfgs, changePluginConfigIO(string(bytes))) - } - return -} - -// GetFlusherConfiguration returns the flusher category and options. -func GetFlusherConfiguration() (flusherCategory string, flusherOptions map[string]interface{}) { - flusherLoadOnce.Do(func() { - extract := func(cfg []byte) (string, map[string]interface{}, bool) { - m := make(map[string]interface{}) - err := json.Unmarshal(cfg, &m) - if err != nil { - logger.Error(context.Background(), "DEFAULT_FLUSHER_ALARM", "err", err) - return "", nil, false - } - c, ok := m["type"].(string) - if !ok { - return "", nil, false - } - options, ok := m["detail"].(map[string]interface{}) - if !ok { - return c, nil, true - } - return c, options, true - } - if fCfg, err := os.ReadFile(*FlusherConfig); err == nil { - category, options, ok := extract(fCfg) - if ok { - flusherType = category - flusherCfg = options - } else { - flusherType, flusherCfg, _ = extract([]byte(defaultFlusherConfig)) - } - } else { - flusherType, flusherCfg, _ = extract([]byte(defaultFlusherConfig)) - } - - }) - return flusherType, flusherCfg -} - -func OverrideByEnv() { - _ = util.InitFromEnvBool("LOGTAIL_DEBUG_FLAG", HTTPProfFlag, *HTTPProfFlag) - _ = util.InitFromEnvBool("LOGTAIL_AUTO_PROF", AutoProfile, *AutoProfile) - _ = util.InitFromEnvBool("LOGTAIL_FORCE_COLLECT_SELF_TELEMETRY", ForceSelfCollect, *ForceSelfCollect) - _ = util.InitFromEnvBool("LOGTAIL_HTTP_LOAD_CONFIG", HTTPLoadFlag, *HTTPLoadFlag) - _ = util.InitFromEnvBool("ALICLOUD_LOG_STATEFULSET_FLAG", StatefulSetFlag, *StatefulSetFlag) - _ = util.InitFromEnvString("DEPLOY_MODE", DeployMode, *DeployMode) - _ = util.InitFromEnvBool("ENABLE_KUBERNETES_META", EnableKubernetesMeta, *EnableKubernetesMeta) -} - -type pipelineConfig struct { - Inputs []interface{} `json:"inputs"` - Processors []interface{} `json:"processors"` - Aggregators []interface{} `json:"aggregators"` - Flushers []interface{} `json:"flushers"` -} - -var ( - fileInput = map[string]interface{}{ - "type": "metric_debug_file", - "detail": map[string]interface{}{ - "InputFilePath": "./input.log", - "FieldName": "content", - "LineLimit": 1000, - }, - } - fileOutput = map[string]interface{}{ - "type": "flusher_stdout", - "detail": map[string]interface{}{ - "FileName": "./output.log", - }, - } -) - -func changePluginConfigIO(pluginCfg string) string { - if *FileIOFlag { - var newCfg pipelineConfig - if err := json.Unmarshal([]byte(pluginCfg), &newCfg); err == nil { - // Input - fileInput["detail"].(map[string]interface{})["InputFilePath"] = *InputFile - fileInput["detail"].(map[string]interface{})["FieldName"] = *InputField - fileInput["detail"].(map[string]interface{})["LineLimit"] = *InputLineLimit - newCfg.Inputs = []interface{}{fileInput} - // Processors - if newCfg.Processors == nil { - newCfg.Processors = make([]interface{}, 0) - } - // Aggregators - if newCfg.Aggregators == nil { - newCfg.Aggregators = make([]interface{}, 0) - } - // Flushers - fileOutput["detail"].(map[string]interface{})["FileName"] = *OutputFile - newCfg.Flushers = append(newCfg.Flushers, fileOutput) - - cfg, _ := json.Marshal(newCfg) - pluginCfg = string(cfg) - } else { - logger.Error(context.Background(), "PLUGIN_UNMARSHAL_ALARM", "err", err) - } - return pluginCfg - } - return pluginCfg -} diff --git a/plugin_main/plugin_export.go b/plugin_main/plugin_export.go index e306c7d305..ed5465656d 100644 --- a/plugin_main/plugin_export.go +++ b/plugin_main/plugin_export.go @@ -25,11 +25,11 @@ import ( "unsafe" "github.com/alibaba/ilogtail/pkg/config" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/helper/k8smeta" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/util" - "github.com/alibaba/ilogtail/plugin_main/flags" "github.com/alibaba/ilogtail/pluginmanager" ) @@ -290,9 +290,9 @@ func GetContainerMeta(containerID string) *C.struct_containerMeta { return returnStruct } -//export GetPipelineMetrics -func GetPipelineMetrics() *C.PluginMetrics { - results := pluginmanager.GetMetrics() +//export GetGoMetrics +func GetGoMetrics(metricType string) *C.PluginMetrics { + results := pluginmanager.GetMetrics(metricType) // 统计所有键值对的总数,用于分配内存 numMetrics := len(results) diff --git a/plugin_main/plugin_http.go b/plugin_main/plugin_http.go index 062a414bcd..de90d8e171 100644 --- a/plugin_main/plugin_http.go +++ b/plugin_main/plugin_http.go @@ -28,8 +28,8 @@ import ( "time" "github.com/alibaba/ilogtail/pkg/config" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/logger" - "github.com/alibaba/ilogtail/plugin_main/flags" "github.com/alibaba/ilogtail/pluginmanager" ) diff --git a/plugin_main/plugin_main.go b/plugin_main/plugin_main.go index c86127eec5..9387074c7f 100644 --- a/plugin_main/plugin_main.go +++ b/plugin_main/plugin_main.go @@ -16,21 +16,116 @@ package main import ( "context" + "encoding/json" "flag" "fmt" + "os" "runtime" "github.com/alibaba/ilogtail/pkg/doc" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/helper/k8smeta" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/pipeline" "github.com/alibaba/ilogtail/pkg/signals" "github.com/alibaba/ilogtail/pkg/util" - "github.com/alibaba/ilogtail/plugin_main/flags" _ "github.com/alibaba/ilogtail/plugin_main/wrapmemcpy" _ "github.com/alibaba/ilogtail/plugins/all" ) +// LoadConfig read the plugin content. +func LoadConfigPurPlugin() (globalCfg string, pluginCfgs []string, err error) { + if gCfg, errRead := os.ReadFile(*flags.GlobalConfig); errRead != nil { + globalCfg = flags.DefaultGlobalConfig + } else { + globalCfg = string(gCfg) + } + + if !json.Valid([]byte(globalCfg)) { + err = fmt.Errorf("illegal input global config:%s", globalCfg) + return + } + + var pluginCfg string + if pCfg, errRead := os.ReadFile(*flags.PluginConfig); errRead == nil { + pluginCfg = string(pCfg) + } else { + pluginCfg = flags.DefaultPluginConfig + } + + if !json.Valid([]byte(pluginCfg)) { + err = fmt.Errorf("illegal input plugin config:%s", pluginCfg) + return + } + + var cfgs []map[string]interface{} + errUnmarshal := json.Unmarshal([]byte(pluginCfg), &cfgs) + if errUnmarshal != nil { + pluginCfgs = append(pluginCfgs, changePluginConfigIO(pluginCfg)) + return + } + for _, cfg := range cfgs { + bytes, _ := json.Marshal(cfg) + pluginCfgs = append(pluginCfgs, changePluginConfigIO(string(bytes))) + } + return +} + +type pipelineConfig struct { + Inputs []interface{} `json:"inputs"` + Processors []interface{} `json:"processors"` + Aggregators []interface{} `json:"aggregators"` + Flushers []interface{} `json:"flushers"` +} + +var ( + fileInput = map[string]interface{}{ + "type": "metric_debug_file", + "detail": map[string]interface{}{ + "InputFilePath": "./input.log", + "FieldName": "content", + "LineLimit": 1000, + }, + } + fileOutput = map[string]interface{}{ + "type": "flusher_stdout", + "detail": map[string]interface{}{ + "FileName": "./output.log", + }, + } +) + +func changePluginConfigIO(pluginCfg string) string { + if *flags.FileIOFlag { + var newCfg pipelineConfig + if err := json.Unmarshal([]byte(pluginCfg), &newCfg); err == nil { + // Input + fileInput["detail"].(map[string]interface{})["InputFilePath"] = *flags.InputFile + fileInput["detail"].(map[string]interface{})["FieldName"] = *flags.InputField + fileInput["detail"].(map[string]interface{})["LineLimit"] = *flags.InputLineLimit + newCfg.Inputs = []interface{}{fileInput} + // Processors + if newCfg.Processors == nil { + newCfg.Processors = make([]interface{}, 0) + } + // Aggregators + if newCfg.Aggregators == nil { + newCfg.Aggregators = make([]interface{}, 0) + } + // Flushers + fileOutput["detail"].(map[string]interface{})["FileName"] = *flags.OutputFile + newCfg.Flushers = append(newCfg.Flushers, fileOutput) + + cfg, _ := json.Marshal(newCfg) + pluginCfg = string(cfg) + } else { + logger.Error(context.Background(), "PLUGIN_UNMARSHAL_ALARM", "err", err) + } + return pluginCfg + } + return pluginCfg +} + // main export http control method in pure GO. func main() { flag.Parse() @@ -46,7 +141,7 @@ func main() { fmt.Println("hostIP : ", util.GetIPAddress()) fmt.Printf("load config %s %s %s\n", *flags.GlobalConfig, *flags.PluginConfig, *flags.FlusherConfig) - globalCfg, pluginCfgs, err := flags.LoadConfig() + globalCfg, pluginCfgs, err := LoadConfigPurPlugin() fmt.Println("global config : ", globalCfg) fmt.Println("plugin config : ", pluginCfgs) if err != nil { diff --git a/pluginmanager/logtail_port_manager.go b/pluginmanager/logtail_port_manager.go index 6dc40b6972..e96aa0b8f7 100644 --- a/pluginmanager/logtail_port_manager.go +++ b/pluginmanager/logtail_port_manager.go @@ -24,8 +24,8 @@ import ( "strconv" "strings" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/logger" - "github.com/alibaba/ilogtail/plugin_main/flags" ) func getExcludePorts() []int { diff --git a/pluginmanager/logtail_port_manager_test.go b/pluginmanager/logtail_port_manager_test.go index 611a95b557..72e34894a3 100644 --- a/pluginmanager/logtail_port_manager_test.go +++ b/pluginmanager/logtail_port_manager_test.go @@ -26,8 +26,8 @@ import ( "github.com/stretchr/testify/suite" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/logger" - "github.com/alibaba/ilogtail/plugin_main/flags" ) func TestLogtailPortManager(t *testing.T) { diff --git a/pluginmanager/metric_export.go b/pluginmanager/metric_export.go index 0ded2280d5..43bfbd61fa 100644 --- a/pluginmanager/metric_export.go +++ b/pluginmanager/metric_export.go @@ -19,20 +19,68 @@ import ( "strings" ) -func GetMetrics() []map[string]string { +const ( + MetricExportTypeGo = "direct" + MetricExportTypeCpp = "cpp_provided" +) + +func GetMetrics(metricType string) []map[string]string { + if metricType == MetricExportTypeGo { + return GetGoDirectMetrics() + } + if metricType == MetricExportTypeCpp { + return GetGoCppProvidedMetrics() + } + return []map[string]string{} +} + +// 直接输出的go指标,例如go插件指标 +// +// []map[string]string{ +// { +// "label.plugin_name": "processor_test", +// "value.proc_in_records_total": "100", +// }, +// { +// "label.plugin_name": "flusher_stdout", +// "value.flusher_in_records_total": "100", +// }, +// } +func GetGoDirectMetrics() []map[string]string { metrics := make([]map[string]string, 0) - LogtailConfig.Range(func(key, value interface{}) bool { - config := value.(*LogstoreConfig) + // go plugin metrics + metrics = append(metrics, GetGoPluginMetrics()...) + return metrics +} + +// 由C++定义的指标,go把值传过去,例如go的进程级指标 +// +// []map[string]string{ +// { +// "agent_go_memory_used_mb": "100", +// "agent_go_routines_total": "20" +// } +// } +func GetGoCppProvidedMetrics() []map[string]string { + metrics := make([]map[string]string, 0) + // agent-level metrics + metrics = append(metrics, GetAgentStat()...) + return metrics +} + +// go 插件指标,直接输出 +func GetGoPluginMetrics() []map[string]string { + metrics := make([]map[string]string, 0) + for _, config := range LogtailConfig { metrics = append(metrics, config.Context.ExportMetricRecords()...) - return true - }) - metrics = append(metrics, GetAgentStat()) + } return metrics } -func GetAgentStat() map[string]string { - recrods := map[string]string{} - recrods["metric-level"] = "agent" +// go 进程级指标,由C++部分注册 +func GetAgentStat() []map[string]string { + metrics := []map[string]string{} + metric := map[string]string{} // key is the metric key in runtime/metrics, value is agent's metric key metricNames := map[string]string{ // cpu @@ -52,20 +100,22 @@ func GetAgentStat() map[string]string { // push results to recrods for _, sample := range samples { - recordName := metricNames[sample.Name] - recordValue := sample.Value - recordValueString := "" - switch recordValue.Kind() { + key := metricNames[sample.Name] + value := sample.Value + valueStr := "" + switch value.Kind() { case goruntimemetrics.KindUint64: - if strings.HasSuffix(recordName, "_mb") { - recordValueString = strconv.FormatUint(recordValue.Uint64()/1024/1024, 10) + if strings.HasSuffix(key, "_mb") { + valueStr = strconv.FormatUint(value.Uint64()/1024/1024, 10) } else { - recordValueString = strconv.FormatUint(recordValue.Uint64(), 10) + valueStr = strconv.FormatUint(value.Uint64(), 10) } case goruntimemetrics.KindFloat64: - recordValueString = strconv.FormatFloat(recordValue.Float64(), 'g', -1, 64) + valueStr = strconv.FormatFloat(value.Float64(), 'g', -1, 64) } - recrods[recordName] = recordValueString + metric[key] = valueStr } - return recrods + + metrics = append(metrics, metric) + return metrics } diff --git a/pluginmanager/plugin_manager.go b/pluginmanager/plugin_manager.go index 8ef19ec609..b7cd262165 100644 --- a/pluginmanager/plugin_manager.go +++ b/pluginmanager/plugin_manager.go @@ -23,10 +23,10 @@ import ( "time" "github.com/alibaba/ilogtail/pkg/config" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/pipeline" - "github.com/alibaba/ilogtail/plugin_main/flags" ) // Following variables are exported so that tests of main package can reference them. diff --git a/pluginmanager/plugin_runner_v1.go b/pluginmanager/plugin_runner_v1.go index d93238e068..5076457043 100644 --- a/pluginmanager/plugin_runner_v1.go +++ b/pluginmanager/plugin_runner_v1.go @@ -17,12 +17,12 @@ package pluginmanager import ( "time" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/pipeline" "github.com/alibaba/ilogtail/pkg/protocol" "github.com/alibaba/ilogtail/pkg/util" - "github.com/alibaba/ilogtail/plugin_main/flags" ) type pluginv1Runner struct { diff --git a/plugins/flusher/prometheus/config.go b/plugins/flusher/prometheus/config.go new file mode 100644 index 0000000000..43da9e5bac --- /dev/null +++ b/plugins/flusher/prometheus/config.go @@ -0,0 +1,22 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +type config struct { + // RemoteURL to request + Endpoint string `validate:"required,http_url" json:"Endpoint"` + // Max size of timeseries slice for prometheus remote write request, default is 1000 + SeriesLimit int `validate:"number" json:"SeriesLimit,omitempty"` +} diff --git a/plugins/flusher/prometheus/def.go b/plugins/flusher/prometheus/def.go new file mode 100644 index 0000000000..7ccb369008 --- /dev/null +++ b/plugins/flusher/prometheus/def.go @@ -0,0 +1,47 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "time" +) + +const ( + defaultTimeout = 1 * time.Minute + defaultSeriesLimit = 1000 + defaultConcurrency = 2 + defaultMaxConnsPerHost = 50 + defaultMaxIdleConnsPerHost = 50 + defaultIdleConnTimeout = 90 * time.Second + defaultWriteBufferSize = 64 * 1024 + defaultQueueCapacity = 1024 +) + +const ( + headerKeyUserAgent = "User-Agent" + headerValUserAgent = "oneagent prometheus remote write flusher" + + headerKeyContentType = "Content-Type" + headerValContentType = "application/x-protobuf" + + headerKeyContentEncoding = "Content-Encoding" + headerValContentEncoding = "snappy" + + headerKeyPromRemoteWriteVersion = "X-Prometheus-Remote-Write-Version" + headerValPromRemoteWriteVersion = "0.1.0" +) + +var errNoHTTPFlusher = errors.New("no http flusher instance in prometheus flusher instance") diff --git a/plugins/flusher/prometheus/flusher_prometheus.go b/plugins/flusher/prometheus/flusher_prometheus.go new file mode 100644 index 0000000000..aa75f53c9e --- /dev/null +++ b/plugins/flusher/prometheus/flusher_prometheus.go @@ -0,0 +1,191 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + + "github.com/alibaba/ilogtail/pkg/logger" + "github.com/alibaba/ilogtail/pkg/models" + "github.com/alibaba/ilogtail/pkg/pipeline" + "github.com/alibaba/ilogtail/pkg/pipeline/extensions" + "github.com/alibaba/ilogtail/plugins/flusher/http" +) + +var _ pipeline.FlusherV2 = (*FlusherPrometheus)(nil) + +type FlusherPrometheus struct { + config // config for flusher_prometheus only (not belongs to flusher_http) + *http.FlusherHTTP // with config for flusher_http as well + + ctx pipeline.Context +} + +func NewPrometheusFlusher() *FlusherPrometheus { + httpFlusher := http.NewHTTPFlusher() + httpFlusher.DropEventWhenQueueFull = true + + return &FlusherPrometheus{ + FlusherHTTP: httpFlusher, + } +} + +func (p *FlusherPrometheus) Init(context pipeline.Context) error { + p.ctx = context + if p.FlusherHTTP == nil { + logger.Debugf(p.ctx.GetRuntimeContext(), "prometheus flusher (%s) has no http flusher instance", p.Description()) + p.FlusherHTTP = http.NewHTTPFlusher() + } + + if err := p.prepareInit(); err != nil { + logger.Errorf(p.ctx.GetRuntimeContext(), "PROMETHEUS_FLUSHER_INIT_ALARM", + "prometheus flusher prepare init failed, error: %s", err.Error()) + return err + } + + if err := p.FlusherHTTP.Init(context); err != nil { + logger.Errorf(p.ctx.GetRuntimeContext(), "PROMETHEUS_FLUSHER_INIT_ALARM", + "prometheus flusher init http flusher failed, error: %s", err.Error()) + return err + } + + logger.Infof(p.ctx.GetRuntimeContext(), "%s init success", p.Description()) + + return nil +} + +func (p *FlusherPrometheus) prepareInit() error { + if err := p.validateConfig(); err != nil { + return fmt.Errorf("validate config error: %w", err) + } + + p.trySetDefaultConfig() + p.initHTTPFlusherConfig() + + return nil +} + +func (p *FlusherPrometheus) validateConfig() error { + return getValidate().Struct(p.config) +} + +func (p *FlusherPrometheus) trySetDefaultConfig() { + cfg := &p.config + + if cfg.SeriesLimit <= 0 { + cfg.SeriesLimit = defaultSeriesLimit + } +} + +func (p *FlusherPrometheus) initHTTPFlusherConfig() { + hc := p.FlusherHTTP + + // Phase1. init http request config + hc.RemoteURL = p.Endpoint + if hc.Timeout <= 0 { + hc.Timeout = defaultTimeout + } + if hc.MaxConnsPerHost <= 0 { + hc.MaxConnsPerHost = defaultMaxConnsPerHost + } + if hc.MaxIdleConnsPerHost <= 0 { + hc.MaxIdleConnsPerHost = defaultMaxIdleConnsPerHost + } + if hc.IdleConnTimeout <= 0 { + hc.IdleConnTimeout = defaultIdleConnTimeout + } + if hc.WriteBufferSize <= 0 { + hc.WriteBufferSize = defaultWriteBufferSize + } + if hc.Headers == nil { + hc.Headers = make(map[string]string) + } + // according to VictoriaMetrics/app/vmagent/remotewrite/client.go, i.e. + // https://github.com/VictoriaMetrics/VictoriaMetrics/blob/v1.103.0/app/vmagent/remotewrite/client.go#L385-393 + hc.Headers[headerKeyUserAgent] = headerValUserAgent + hc.Headers[headerKeyContentType] = headerValContentType + hc.Headers[headerKeyContentEncoding] = headerValContentEncoding + hc.Headers[headerKeyPromRemoteWriteVersion] = headerValPromRemoteWriteVersion + + // Phase2. init http flusher inner config + if hc.Concurrency <= 0 { + hc.Concurrency = defaultConcurrency + } + if hc.QueueCapacity <= 0 { + hc.QueueCapacity = defaultQueueCapacity + } + + // Phase3. init pipeline group events handle config + if hc.Encoder == nil { + hc.Encoder = &extensions.ExtensionConfig{ + Type: "ext_default_encoder", + Options: map[string]any{"Format": "prometheus", "SeriesLimit": p.SeriesLimit}, + } + } + + // Phase4. mutate rule + // transport config refers to the configuration of vmagent: + // https://github.com/VictoriaMetrics/VictoriaMetrics/blob/v1.100.1/app/vmagent/remotewrite/client.go#L123-126 + // + // however, there is a little difference between flusher_http & vmagent: + // in vmagent, concurrency means number of concurrent queues to each -remoteWrite.url (multiple queues) + // in flusher_http, concurrency means how many goroutines consume data queue (single queue) + if numConns := 2 * hc.Concurrency; hc.MaxConnsPerHost < numConns { + hc.MaxConnsPerHost = numConns + } + if numConns := 2 * hc.Concurrency; hc.MaxIdleConnsPerHost < numConns { + hc.MaxIdleConnsPerHost = numConns + } +} + +func (p *FlusherPrometheus) Description() string { + return "prometheus flusher for ilogtail" +} + +func (p *FlusherPrometheus) IsReady(projectName string, logstoreName string, logstoreKey int64) bool { + if p.FlusherHTTP != nil { + return p.FlusherHTTP.IsReady(projectName, logstoreName, logstoreKey) + } + + return false +} + +func (p *FlusherPrometheus) SetUrgent(flag bool) { + if p.FlusherHTTP != nil { + p.FlusherHTTP.SetUrgent(flag) + } +} + +func (p *FlusherPrometheus) Stop() error { + if p.FlusherHTTP != nil { + return p.FlusherHTTP.Stop() + } + + return nil +} + +func (p *FlusherPrometheus) Export(events []*models.PipelineGroupEvents, context pipeline.PipelineContext) error { + if p.FlusherHTTP != nil { + return p.FlusherHTTP.Export(events, context) + } + + return errNoHTTPFlusher +} + +func init() { + pipeline.AddFlusherCreator("flusher_prometheus", func() pipeline.Flusher { + return NewPrometheusFlusher() + }) +} diff --git a/plugins/flusher/prometheus/flusher_prometheus_test.go b/plugins/flusher/prometheus/flusher_prometheus_test.go new file mode 100644 index 0000000000..f061f12046 --- /dev/null +++ b/plugins/flusher/prometheus/flusher_prometheus_test.go @@ -0,0 +1,604 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "sort" + "strings" + "testing" + + "github.com/golang/snappy" + "github.com/jarcoal/httpmock" + "github.com/prometheus/prometheus/prompb" + . "github.com/smartystreets/goconvey/convey" + + "github.com/alibaba/ilogtail/pkg/models" + "github.com/alibaba/ilogtail/pkg/pipeline/extensions" + "github.com/alibaba/ilogtail/pkg/protocol/encoder/prometheus" + _ "github.com/alibaba/ilogtail/plugins/extension/basicauth" + defaultencoder "github.com/alibaba/ilogtail/plugins/extension/default_encoder" + hf "github.com/alibaba/ilogtail/plugins/flusher/http" + "github.com/alibaba/ilogtail/plugins/test/mock" +) + +// 场景:插件初始化 +// 因子:不正确的config +// 因子:必需字段Endpoint缺失 +// 预期:初始化失败 +func TestPrometheusFlusher_ShouldInitFailed_GivenEmptyEndpoint(t *testing.T) { + Convey("Given a prometheus flusher with empty Endpoint", t, func() { + flusher := &FlusherPrometheus{} + + Convey("Then Init() should return error", func() { + err := flusher.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldNotBeNil) + + ready := flusher.IsReady("p", "l", 1) + So(ready, ShouldBeFalse) + }) + }) +} + +// 场景:插件初始化 +// 因子:正确的config +// 因子:必要的config +// 预期:初始化成功 +func TestPrometheusFlusher_ShouldInitSuccess_GivenNecessaryConfig(t *testing.T) { + Convey("Given a prometheus flusher with necessary config", t, func() { + flusher := &FlusherPrometheus{ + config: config{ + Endpoint: "http://localhost:9090/write", + }, + } + + Convey("Then Init() should implement prometheus encoder success", func() { + err := flusher.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldBeNil) + + ready := flusher.IsReady("p", "l", 1) + So(ready, ShouldBeTrue) + + Convey("extension should be *encoder.ExtensionDefaultEncoder", func() { + ext, err := flusher.ctx.GetExtension(flusher.Encoder.Type, flusher.Encoder.Options) + So(err, ShouldBeNil) + + enc, ok := ext.(extensions.Encoder) + So(ok, ShouldBeTrue) + + defEnc, ok := enc.(*defaultencoder.ExtensionDefaultEncoder) + So(ok, ShouldBeTrue) + + Convey("encoder should be *prometheus.Encoder", func() { + promEnc, ok := defEnc.Encoder.(*prometheus.Encoder) + So(ok, ShouldBeTrue) + + Convey("series limit should be default series limit", func() { + So(promEnc.SeriesLimit, ShouldEqual, defaultSeriesLimit) + }) + }) + }) + }) + }) +} + +// 场景:Prometheus指标数据 写 RemoteStorage +// 因子:数据模型V2 +// 因子:正确的数据 +// 因子:Basic Auth鉴权方案 +// 因子:相同 labelsets 的 Sample 不聚合(i.e. []prompb.Sample 就 1 条记录) +// 因子:同一个 *models.PipelineGroupEvents,其 tags 相同(i.e. 仅一组tags) +// 预期:写成功 +// PS: +// 1. “相同 labelsets 的 Sample 不聚合”的原因:从实际使用场景看,一般每 30s 才抓一次点,所以一般时间戳只会有1个 +// 2. “同一个 *models.PipelineGroupEvents”,从实际使用场景看,一般有 1至多组 tags,这里先考虑 仅1组tags 的情况 +func TestPrometheusFlusher_ShouldWriteToRemoteStorageSuccess_GivenCorrectDataWithV2Model_OnlyOneGroupOfTags(t *testing.T) { + Convey("Given correct data with []*models.PipelineGroupEvents type", t, func() { + var actualWriteRequests []*prompb.WriteRequest + endpoint := "http://localhost:9090/write" + expectedUsername, expectedPassword := "user", "password" + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("POST", endpoint, func(req *http.Request) (*http.Response, error) { + username, password, err := parseBasicAuth(req.Header.Get("Authorization")) + if err != nil { + return httpmock.NewStringResponse(http.StatusUnauthorized, "Invalid Authorization"), fmt.Errorf("invalid authentication: %w", err) + } + + if username != expectedUsername { + return httpmock.NewStringResponse(http.StatusUnauthorized, "Invalid Username"), fmt.Errorf("invalid username: %s", username) + } + if password != expectedPassword { + return httpmock.NewStringResponse(http.StatusForbidden, "Invalid Password"), fmt.Errorf("invalid password: %s", password) + } + + if !validateHTTPHeader(req.Header) { + return httpmock.NewStringResponse(http.StatusBadRequest, "Invalid HTTP Header"), fmt.Errorf("invalid http header: %+v", req.Header) + } + + wr, err := parsePromWriteRequest(req.Body) + if err != nil { + return httpmock.NewStringResponse(http.StatusBadRequest, "Invalid Write Request"), fmt.Errorf("parse prometheus write request error: %w", err) + } + + actualWriteRequests = append(actualWriteRequests, wr) + + return httpmock.NewStringResponse(http.StatusOK, "ok"), nil + }) + + flusher := &FlusherPrometheus{ + config: config{ + Endpoint: endpoint, + SeriesLimit: 1024, + }, + FlusherHTTP: hf.NewHTTPFlusher(), + } + flusher.Concurrency = 3 + flusher.Authenticator = &extensions.ExtensionConfig{ + Type: "ext_basicauth", + Options: map[string]any{ + "Username": expectedUsername, + "Password": expectedPassword, + }, + } + + err := flusher.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldBeNil) + + mockMetadata := models.NewMetadata() + mockTags := models.NewTags() + + Convey("Export data", func() { + groupEventsSlice := []*models.PipelineGroupEvents{ + { + Group: models.NewGroup(mockMetadata, mockTags), + Events: []models.PipelineEvent{ + models.NewSingleValueMetric("test_metric", models.MetricTypeGauge, models.NewTagsWithKeyValues("label1", "value1"), 1234567890*1e6, 1.23), + models.NewSingleValueMetric("test_metric", models.MetricTypeGauge, models.NewTagsWithKeyValues("label1", "value1"), 1234567891*1e6, 2.34), + }, + }, + { + Group: models.NewGroup(mockMetadata, mockTags), + Events: []models.PipelineEvent{ + models.NewSingleValueMetric("test_metric", models.MetricTypeGauge, models.NewTagsWithKeyValues("label2", "value2"), 1234567890*1e6, 1.23), + models.NewSingleValueMetric("test_metric", models.MetricTypeGauge, models.NewTagsWithKeyValues("label2", "value2"), 1234567891*1e6, 2.34), + }, + }, + } + + // expected prometheus metric samples: + // `test_metric{label1="value1"} 1.23 1234567890`, + // `test_metric{label1="value1"} 2.34 1234567891`, + // `test_metric{label2="value2"} 1.23 1234567890`, + // `test_metric{label2="value2"} 2.34 1234567891`, + expectedWriteRequest := []*prompb.WriteRequest{ + { + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "label1", Value: "value1"}, + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []prompb.Sample{ + {Value: 1.23, Timestamp: 1234567890}, + }, + }, + { + Labels: []prompb.Label{ + {Name: "label1", Value: "value1"}, + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []prompb.Sample{ + {Value: 2.34, Timestamp: 1234567891}, + }, + }, + }, + }, + { + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "label2", Value: "value2"}, + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []prompb.Sample{ + {Value: 1.23, Timestamp: 1234567890}, + }, + }, + { + Labels: []prompb.Label{ + {Name: "label2", Value: "value2"}, + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []prompb.Sample{ + {Value: 2.34, Timestamp: 1234567891}, + }, + }, + }, + }, + } + expectedWriteRequest = sortPromLabelsInWriteRequest(expectedWriteRequest) + httpmock.ZeroCallCounters() + err := flusher.Export(groupEventsSlice, nil) + So(err, ShouldBeNil) + + err = flusher.Stop() + So(err, ShouldBeNil) + + Convey("each GroupEvents should send in a single request", func() { + So(httpmock.GetTotalCallCount(), ShouldEqual, 2) + }) + Convey("request body should be valid", func() { + sort.Sort(promWriteRequest(actualWriteRequests)) + actualWriteRequests = sortPromLabelsInWriteRequest(actualWriteRequests) // ensure that the slice order is consistent with expected + So(actualWriteRequests, ShouldResemble, expectedWriteRequest) + }) + }) + }) +} + +// 场景:Prometheus指标数据 写 RemoteStorage +// 因子:数据模型V2 +// 因子:正确的数据 +// 因子:Basic Auth鉴权方案 +// 因子:相同 labelsets 的 Sample 不聚合(i.e. []prompb.Sample 就 1 条记录) +// 因子:同一个 *models.PipelineGroupEvents,其 tags 不完全相同(i.e. 有多组tags) +// 预期:写成功 +// PS: +// 1. “相同 labelsets 的 Sample 不聚合”的原因:从实际使用场景看,一般每 30s 才抓一次点,所以一般时间戳只会有1个 +// 2. “同一个 *models.PipelineGroupEvents”,从实际使用场景看,一般有 1至多组 tags,这里考虑 多组tags 的情况 +func TestPrometheusFlusher_ShouldWriteToRemoteStorageSuccess_GivenCorrectDataWithV2Model_MultiGroupsOfTags(t *testing.T) { + Convey("Given correct data with []*models.PipelineGroupEvents type", t, func() { + var actualWriteRequests []*prompb.WriteRequest + endpoint := "http://localhost:9090/write" + expectedUsername, expectedPassword := "user", "password" + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("POST", endpoint, func(req *http.Request) (*http.Response, error) { + username, password, err := parseBasicAuth(req.Header.Get("Authorization")) + if err != nil { + return httpmock.NewStringResponse(http.StatusUnauthorized, "Invalid Authorization"), fmt.Errorf("invalid authentication: %w", err) + } + + if username != expectedUsername { + return httpmock.NewStringResponse(http.StatusUnauthorized, "Invalid Username"), fmt.Errorf("invalid username: %s", username) + } + if password != expectedPassword { + return httpmock.NewStringResponse(http.StatusForbidden, "Invalid Password"), fmt.Errorf("invalid password: %s", password) + } + + if !validateHTTPHeader(req.Header) { + return httpmock.NewStringResponse(http.StatusBadRequest, "Invalid HTTP Header"), fmt.Errorf("invalid http header: %+v", req.Header) + } + + wr, err := parsePromWriteRequest(req.Body) + if err != nil { + return httpmock.NewStringResponse(http.StatusBadRequest, "Invalid Write Request"), fmt.Errorf("parse prometheus write request error: %w", err) + } + + actualWriteRequests = append(actualWriteRequests, wr) + + return httpmock.NewStringResponse(http.StatusOK, "ok"), nil + }) + + flusher := &FlusherPrometheus{ + config: config{ + Endpoint: endpoint, + SeriesLimit: 1024, + }, + FlusherHTTP: hf.NewHTTPFlusher(), + } + flusher.Concurrency = 3 + flusher.Authenticator = &extensions.ExtensionConfig{ + Type: "ext_basicauth", + Options: map[string]any{ + "Username": expectedUsername, + "Password": expectedPassword, + }, + } + + err := flusher.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldBeNil) + + mockMetadata := models.NewMetadata() + mockTags := models.NewTags() + + Convey("Export data", func() { + groupEventsSlice := []*models.PipelineGroupEvents{ + { + Group: models.NewGroup(mockMetadata, mockTags), + Events: []models.PipelineEvent{ + models.NewSingleValueMetric("test_metric", models.MetricTypeGauge, models.NewTagsWithKeyValues("label1", "value1"), 1234567890*1e6, 1.23), + models.NewSingleValueMetric("test_metric", models.MetricTypeGauge, models.NewTagsWithKeyValues("label1", "value1"), 1234567891*1e6, 2.34), + models.NewSingleValueMetric("test_metric", models.MetricTypeGauge, models.NewTagsWithKeyValues("label2", "value2"), 1234567890*1e6, 1.23), + models.NewSingleValueMetric("test_metric", models.MetricTypeGauge, models.NewTagsWithKeyValues("label2", "value2"), 1234567891*1e6, 2.34), + }, + }, + } + + // expected prometheus metric samples: + // `test_metric{label1="value1"} 1.23 1234567890`, + // `test_metric{label1="value1"} 2.34 1234567891`, + // `test_metric{label2="value2"} 1.23 1234567890`, + // `test_metric{label2="value2"} 2.34 1234567891`, + expectedWriteRequest := []*prompb.WriteRequest{ + { + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "label1", Value: "value1"}, + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []prompb.Sample{ + {Value: 1.23, Timestamp: 1234567890}, + }, + }, + { + Labels: []prompb.Label{ + {Name: "label1", Value: "value1"}, + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []prompb.Sample{ + {Value: 2.34, Timestamp: 1234567891}, + }, + }, + { + Labels: []prompb.Label{ + {Name: "label2", Value: "value2"}, + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []prompb.Sample{ + {Value: 1.23, Timestamp: 1234567890}, + }, + }, + { + Labels: []prompb.Label{ + {Name: "label2", Value: "value2"}, + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []prompb.Sample{ + {Value: 2.34, Timestamp: 1234567891}, + }, + }, + }, + }, + } + expectedWriteRequest = sortPromLabelsInWriteRequest(expectedWriteRequest) + httpmock.ZeroCallCounters() + err := flusher.Export(groupEventsSlice, nil) + So(err, ShouldBeNil) + + err = flusher.Stop() + So(err, ShouldBeNil) + + Convey("each GroupEvents should send in a single request", func() { + So(httpmock.GetTotalCallCount(), ShouldEqual, 1) + }) + Convey("request body should be valid", func() { + So(actualWriteRequests, ShouldResemble, expectedWriteRequest) + }) + }) + }) +} + +// 场景:Prometheus指标数据 写 RemoteStorage +// 因子:数据模型V2 +// 因子:错误的数据 +// 因子:空指针/零值/非Metric数据 +// 因子:Basic Auth鉴权方案 +// 因子:相同 labelsets 的 Sample 不聚合(i.e. []prompb.Sample 就 1 条记录) +// 因子:同一个 *models.PipelineGroupEvents,其 tags 不完全相同(i.e. 有多组tags) +// 预期:不会向 RemoteStorage 发起写请求 +// PS: +// 1. “相同 labelsets 的 Sample 不聚合”的原因:从实际使用场景看,一般每 30s 才抓一次点,所以一般时间戳只会有1个 +// 2. “同一个 *models.PipelineGroupEvents”,从实际使用场景看,一般有 1至多组 tags,这里考虑 多组tags 的情况 +func TestPrometheusFlusher_ShouldNotWriteToRemoteStorage_GivenIncorrectDataWithV2Model_MultiGroupsOfTags(t *testing.T) { + Convey("Given incorrect data with []*models.PipelineGroupEvents type", t, func() { + var actualWriteRequests []*prompb.WriteRequest + endpoint := "http://localhost:9090/write" + expectedUsername, expectedPassword := "user", "password" + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("POST", endpoint, func(req *http.Request) (*http.Response, error) { + username, password, err := parseBasicAuth(req.Header.Get("Authorization")) + if err != nil { + return httpmock.NewStringResponse(http.StatusUnauthorized, "Invalid Authorization"), fmt.Errorf("invalid authentication: %w", err) + } + + if username != expectedUsername { + return httpmock.NewStringResponse(http.StatusUnauthorized, "Invalid Username"), fmt.Errorf("invalid username: %s", username) + } + if password != expectedPassword { + return httpmock.NewStringResponse(http.StatusForbidden, "Invalid Password"), fmt.Errorf("invalid password: %s", password) + } + + if !validateHTTPHeader(req.Header) { + return httpmock.NewStringResponse(http.StatusBadRequest, "Invalid HTTP Header"), fmt.Errorf("invalid http header: %+v", req.Header) + } + + wr, err := parsePromWriteRequest(req.Body) + if err != nil { + return httpmock.NewStringResponse(http.StatusBadRequest, "Invalid Write Request"), fmt.Errorf("parse prometheus write request error: %w", err) + } + + actualWriteRequests = append(actualWriteRequests, wr) + + return httpmock.NewStringResponse(http.StatusOK, "ok"), nil + }) + + flusher := &FlusherPrometheus{ + config: config{ + Endpoint: endpoint, + SeriesLimit: 1024, + }, + FlusherHTTP: hf.NewHTTPFlusher(), + } + flusher.Concurrency = 3 + flusher.Authenticator = &extensions.ExtensionConfig{ + Type: "ext_basicauth", + Options: map[string]any{ + "Username": expectedUsername, + "Password": expectedPassword, + }, + } + + err := flusher.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldBeNil) + + mockMetadata := models.NewMetadata() + mockTags := models.NewTags() + + Convey("Export data", func() { + incorrectGroupEventsSlice := []*models.PipelineGroupEvents{ + nil, + {}, + { + Group: models.NewGroup(mockMetadata, mockTags), + Events: []models.PipelineEvent{}, + }, + { + Group: models.NewGroup(mockMetadata, mockTags), + Events: []models.PipelineEvent{ + nil, + models.NewSimpleLog(nil, nil, 0), + }, + }, + } + + var expectedWriteRequest []*prompb.WriteRequest + httpmock.ZeroCallCounters() + err := flusher.Export(incorrectGroupEventsSlice, nil) + So(err, ShouldBeNil) + + err = flusher.Stop() + So(err, ShouldBeNil) + + Convey("each GroupEvents should send in a single request", func() { + So(httpmock.GetTotalCallCount(), ShouldEqual, 0) + }) + Convey("request body should be valid", func() { + So(actualWriteRequests, ShouldResemble, expectedWriteRequest) + }) + }) + }) +} + +func parseBasicAuth(auth string) (string, string, error) { + if !strings.HasPrefix(auth, "Basic ") { + return "", "", fmt.Errorf("invalid authentication: %s", auth) + } + + decodedBytes, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return "", "", fmt.Errorf("base64 decode error: %w", err) + } + authBytes := bytes.Split(decodedBytes, []byte(":")) + if len(authBytes) != 2 { + return "", "", fmt.Errorf("invalid auth parts: %d", len(authBytes)) + } + + username, password := string(authBytes[0]), string(authBytes[1]) + return username, password, nil +} + +func validateHTTPHeader(header http.Header) bool { + if header == nil { + return false + } + + return header.Get(headerKeyUserAgent) == headerValUserAgent && + header.Get(headerKeyContentType) == headerValContentType && + header.Get(headerKeyContentEncoding) == headerValContentEncoding && + header.Get(headerKeyPromRemoteWriteVersion) == headerValPromRemoteWriteVersion +} + +func parsePromWriteRequest(r io.Reader) (*prompb.WriteRequest, error) { + compressedData, err := io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to read request body: %w", err) + } + + data, err := snappy.Decode(nil, compressedData) + if err != nil { + return nil, fmt.Errorf("failed to decode compressed data: %w", err) + } + + wr := new(prompb.WriteRequest) + if err := wr.Unmarshal(data); err != nil { + return nil, fmt.Errorf("failed to unmarshal prometheus write request: %w", err) + } + + return wr, nil +} + +type promWriteRequest []*prompb.WriteRequest + +func (p promWriteRequest) Len() int { + return len(p) +} + +func (p promWriteRequest) Less(i, j int) bool { + labelI := p[i].Timeseries[0].Labels[0] + if p[i].Timeseries[0].Labels[0].Name == "__name__" { + labelI = p[i].Timeseries[0].Labels[1] + } + + labelJ := p[j].Timeseries[0].Labels[0] + if p[j].Timeseries[0].Labels[0].Name == "__name__" { + labelJ = p[j].Timeseries[0].Labels[1] + } + + return labelI.Name < labelJ.Name +} + +func (p promWriteRequest) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type promLabels []prompb.Label + +func (p promLabels) Len() int { + return len(p) +} + +func (p promLabels) Less(i, j int) bool { + return p[i].Name < p[j].Name +} + +func (p promLabels) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func sortPromLabelsInWriteRequest(req []*prompb.WriteRequest) []*prompb.WriteRequest { + res := make([]*prompb.WriteRequest, 0, len(req)) + for _, w := range req { + for i := range w.Timeseries { + sort.Sort(promLabels(w.Timeseries[i].Labels)) + } + res = append(res, w) + } + + return res +} diff --git a/plugins/flusher/prometheus/validate.go b/plugins/flusher/prometheus/validate.go new file mode 100644 index 0000000000..9ddd07db04 --- /dev/null +++ b/plugins/flusher/prometheus/validate.go @@ -0,0 +1,34 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "sync" + + "github.com/go-playground/validator/v10" +) + +var ( + validateOnce sync.Once + validateIns *validator.Validate +) + +func getValidate() *validator.Validate { + validateOnce.Do(func() { + validateIns = validator.New(validator.WithRequiredStructEnabled()) + }) + + return validateIns +} diff --git a/plugins/input/kubernetesmetav2/meta_collector.go b/plugins/input/kubernetesmetav2/meta_collector.go index d5c4f242a8..71fdbf0695 100644 --- a/plugins/input/kubernetesmetav2/meta_collector.go +++ b/plugins/input/kubernetesmetav2/meta_collector.go @@ -2,13 +2,16 @@ package kubernetesmetav2 import ( "context" + "encoding/json" + // #nosec G501 "crypto/md5" "fmt" + "strconv" "strings" "time" - v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/alibaba/ilogtail/pkg/helper/k8smeta" "github.com/alibaba/ilogtail/pkg/logger" @@ -19,32 +22,130 @@ import ( type metaCollector struct { serviceK8sMeta *ServiceK8sMeta - processors map[string][]ProcessFunc collector pipeline.Collector - entityTypes []string entityBuffer chan models.PipelineEvent entityLinkBuffer chan models.PipelineEvent - stopCh chan struct{} + stopCh chan struct{} + entityProcessor map[string]ProcessFunc } func (m *metaCollector) Start() error { + m.entityProcessor = map[string]ProcessFunc{ + k8smeta.POD: m.processPodEntity, + k8smeta.NODE: m.processNodeEntity, + k8smeta.SERVICE: m.processServiceEntity, + k8smeta.DEPLOYMENT: m.processDeploymentEntity, + k8smeta.REPLICASET: m.processReplicaSetEntity, + k8smeta.DAEMONSET: m.processDaemonSetEntity, + k8smeta.STATEFULSET: m.processStatefulSetEntity, + k8smeta.CONFIGMAP: m.processConfigMapEntity, + k8smeta.SECRET: m.processSecretEntity, + k8smeta.JOB: m.processJobEntity, + k8smeta.CRONJOB: m.processCronJobEntity, + k8smeta.NAMESPACE: m.processNamespaceEntity, + k8smeta.PERSISTENTVOLUME: m.processPersistentVolumeEntity, + k8smeta.PERSISTENTVOLUMECLAIM: m.processPersistentVolumeClaimEntity, + k8smeta.STORAGECLASS: m.processStorageClassEntity, + k8smeta.INGRESS: m.processIngressEntity, + k8smeta.POD_NODE: m.processPodNodeLink, + k8smeta.REPLICASET_DEPLOYMENT: m.processReplicaSetDeploymentLink, + k8smeta.POD_REPLICASET: m.processPodReplicaSetLink, + k8smeta.POD_STATEFULSET: m.processPodStatefulSetLink, + k8smeta.POD_DAEMONSET: m.processPodDaemonSetLink, + k8smeta.JOB_CRONJOB: m.processJobCronJobLink, + k8smeta.POD_JOB: m.processPodJobLink, + k8smeta.POD_PERSISENTVOLUMECLAIN: m.processPodPVCLink, + k8smeta.POD_CONFIGMAP: m.processPodConfigMapLink, + k8smeta.POD_SECRET: m.processPodSecretLink, + k8smeta.POD_SERVICE: m.processPodServiceLink, + k8smeta.POD_CONTAINER: m.processPodContainerLink, + } + if m.serviceK8sMeta.Pod { m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD, m.handleEvent, m.serviceK8sMeta.Interval) - m.processors[k8smeta.POD] = append(m.processors[k8smeta.POD], m.processPodEntity) - m.processors[k8smeta.POD] = append(m.processors[k8smeta.POD], m.processPodReplicasetLink) - m.entityTypes = append(m.entityTypes, k8smeta.POD) + } + if m.serviceK8sMeta.Node { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.NODE, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Service { m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.SERVICE, m.handleEvent, m.serviceK8sMeta.Interval) - m.processors[k8smeta.SERVICE] = append(m.processors[k8smeta.SERVICE], m.processServiceEntity) - m.entityTypes = append(m.entityTypes, k8smeta.SERVICE) } - if m.serviceK8sMeta.Pod && m.serviceK8sMeta.Service && m.serviceK8sMeta.PodServiceLink { - // link data will be collected in pod registry - m.processors[k8smeta.POD_SERVICE] = append(m.processors[k8smeta.POD_SERVICE], m.processPodServiceLink) - m.entityTypes = append(m.entityTypes, k8smeta.POD_SERVICE) + if m.serviceK8sMeta.Deployment { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.DEPLOYMENT, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.ReplicaSet { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.REPLICASET, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.DaemonSet { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.DAEMONSET, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.StatefulSet { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.STATEFULSET, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Configmap { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.CONFIGMAP, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Secret { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.SECRET, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Job { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.JOB, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.CronJob { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.CRONJOB, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Namespace { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.NAMESPACE, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.PersistentVolume { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUME, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.PersistentVolumeClaim { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUMECLAIM, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.StorageClass { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.STORAGECLASS, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Ingress { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.INGRESS, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Pod && m.serviceK8sMeta.Node { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_NODE, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Deployment && m.serviceK8sMeta.ReplicaSet { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.REPLICASET_DEPLOYMENT, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.ReplicaSet && m.serviceK8sMeta.Pod { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_REPLICASET, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.StatefulSet && m.serviceK8sMeta.Pod { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_STATEFULSET, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.DaemonSet && m.serviceK8sMeta.Pod { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_DAEMONSET, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.CronJob && m.serviceK8sMeta.Job { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.JOB_CRONJOB, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Job && m.serviceK8sMeta.Pod { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_JOB, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Pod && m.serviceK8sMeta.PersistentVolumeClaim { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_PERSISENTVOLUMECLAIN, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Pod && m.serviceK8sMeta.Configmap { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_CONFIGMAP, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Pod && m.serviceK8sMeta.Secret { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_SECRET, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Service && m.serviceK8sMeta.Pod { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_SERVICE, m.handleEvent, m.serviceK8sMeta.Interval) + } + if m.serviceK8sMeta.Pod { + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_CONTAINER, m.handleEvent, m.serviceK8sMeta.Interval) } go m.sendInBackground() return nil @@ -57,57 +158,149 @@ func (m *metaCollector) Stop() error { if m.serviceK8sMeta.Service { m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.SERVICE) } - if m.serviceK8sMeta.Pod && m.serviceK8sMeta.Service && m.serviceK8sMeta.PodServiceLink { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_SERVICE) + if m.serviceK8sMeta.Deployment { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.DEPLOYMENT) + } + if m.serviceK8sMeta.DaemonSet { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.DAEMONSET) + } + if m.serviceK8sMeta.StatefulSet { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.STATEFULSET) + } + if m.serviceK8sMeta.Configmap { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.CONFIGMAP) + } + if m.serviceK8sMeta.Secret { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.SECRET) + } + if m.serviceK8sMeta.Job { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.JOB) + } + if m.serviceK8sMeta.CronJob { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.CRONJOB) + } + if m.serviceK8sMeta.Namespace { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.NAMESPACE) + } + if m.serviceK8sMeta.PersistentVolume { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUME) + } + if m.serviceK8sMeta.PersistentVolumeClaim { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUMECLAIM) + } + if m.serviceK8sMeta.StorageClass { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.STORAGECLASS) + } + if m.serviceK8sMeta.Ingress { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.INGRESS) } close(m.stopCh) return nil } -func (m *metaCollector) handleEvent(event *k8smeta.K8sMetaEvent) { - switch event.EventType { - case k8smeta.EventTypeAdd: - m.handleAdd(event) - case k8smeta.EventTypeUpdate: - m.handleUpdate(event) +func (m *metaCollector) handleEvent(event []*k8smeta.K8sMetaEvent) { + if len(event) == 0 { + return + } + switch event[0].EventType { + case k8smeta.EventTypeAdd, k8smeta.EventTypeUpdate: + for _, e := range event { + m.handleAddOrUpdate(e) + } case k8smeta.EventTypeDelete: - m.handleDelete(event) + for _, e := range event { + m.handleDelete(e) + } default: - logger.Error(context.Background(), "UNKNOWN_EVENT_TYPE", "unknown event type", event.EventType) + logger.Error(context.Background(), "UNKNOWN_EVENT_TYPE", "unknown event type", event[0].EventType) } } -func (m *metaCollector) handleAdd(event *k8smeta.K8sMetaEvent) { - if processors, ok := m.processors[event.Object.ResourceType]; ok { - for _, processor := range processors { - log := processor(event.Object, "create") - if log != nil { - m.send(log, isLink(event.Object.ResourceType)) +func (m *metaCollector) handleAddOrUpdate(event *k8smeta.K8sMetaEvent) { + if processor, ok := m.entityProcessor[event.Object.ResourceType]; ok { + logs := processor(event.Object, "Update") + for _, log := range logs { + m.send(log, !isEntity(event.Object.ResourceType)) + if isEntity(event.Object.ResourceType) { + link := m.generateEntityClusterLink(log) + m.send(link, true) } } } } -func (m *metaCollector) handleUpdate(event *k8smeta.K8sMetaEvent) { - if processors, ok := m.processors[event.Object.ResourceType]; ok { - for _, processor := range processors { - log := processor(event.Object, "update") - if log != nil { - m.send(log, isLink(event.Object.ResourceType)) +func (m *metaCollector) handleDelete(event *k8smeta.K8sMetaEvent) { + if processor, ok := m.entityProcessor[event.Object.ResourceType]; ok { + logs := processor(event.Object, "Expire") + for _, log := range logs { + m.send(log, isEntity(event.Object.ResourceType)) + if !isEntity(event.Object.ResourceType) { + link := m.generateEntityClusterLink(log) + m.send(link, true) } } } } -func (m *metaCollector) handleDelete(event *k8smeta.K8sMetaEvent) { - if processors, ok := m.processors[event.Object.ResourceType]; ok { - for _, processor := range processors { - log := processor(event.Object, "delete") - if log != nil { - m.send(log, isLink(event.Object.ResourceType)) - } - } +func (m *metaCollector) processEntityCommonPart(logContents models.LogContents, kind, namespace, name, method string, firstObservedTime, lastObservedTime int64, creationTime v1.Time) { + // entity reserved fields + logContents.Add(entityDomainFieldName, m.serviceK8sMeta.Domain) + logContents.Add(entityTypeFieldName, m.genEntityTypeKey(kind)) + logContents.Add(entityIDFieldName, m.genKey(namespace, name)) + logContents.Add(entityMethodFieldName, method) + + logContents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(firstObservedTime, 10)) + logContents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(lastObservedTime, 10)) + logContents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval*2), 10)) + logContents.Add(entityCategoryFieldName, defaultEntityCategory) + + // common custom fields + logContents.Add(entityClusterIDFieldName, m.serviceK8sMeta.clusterID) + logContents.Add(entityKindFieldName, kind) + logContents.Add(entityNameFieldName, name) + logContents.Add(entityCreationTimeFieldName, creationTime.Format(time.RFC3339)) +} + +func (m *metaCollector) processEntityLinkCommonPart(logContents models.LogContents, srcKind, srcNamespace, srcName, destKind, destNamespace, destName, method string, firstObservedTime, lastObservedTime int64) { + logContents.Add(entityLinkSrcDomainFieldName, m.serviceK8sMeta.Domain) + logContents.Add(entityLinkSrcEntityTypeFieldName, m.genEntityTypeKey(srcKind)) + logContents.Add(entityLinkSrcEntityIDFieldName, m.genKey(srcNamespace, srcName)) + + logContents.Add(entityLinkDestDomainFieldName, m.serviceK8sMeta.Domain) + logContents.Add(entityLinkDestEntityTypeFieldName, m.genEntityTypeKey(destKind)) + logContents.Add(entityLinkDestEntityIDFieldName, m.genKey(destNamespace, destName)) + + logContents.Add(entityMethodFieldName, method) + + logContents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(firstObservedTime, 10)) + logContents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(lastObservedTime, 10)) + logContents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval*2), 10)) + logContents.Add(entityCategoryFieldName, defaultEntityLinkCategory) + logContents.Add(entityClusterIDFieldName, m.serviceK8sMeta.clusterID) +} + +func (m *metaCollector) processEntityJSONObject(obj map[string]string) string { + if obj == nil { + return "{}" + } + objStr, err := json.Marshal(obj) + if err != nil { + logger.Error(context.Background(), "PROCESS_ENTITY_JSON_OBJECT_FAIL", "process entity json object fail", err) + return "{}" } + return string(objStr) +} + +func (m *metaCollector) processEntityJSONArray(obj []map[string]string) string { + if obj == nil { + return "[]" + } + objStr, err := json.Marshal(obj) + if err != nil { + logger.Error(context.Background(), "PROCESS_ENTITY_JSON_ARRAY_FAIL", "process entity json array fail", err) + return "[]" + } + return string(objStr) } func (m *metaCollector) send(event models.PipelineEvent, entity bool) { @@ -117,11 +310,7 @@ func (m *metaCollector) send(event models.PipelineEvent, entity bool) { } else { buffer = m.entityLinkBuffer } - select { - case buffer <- event: - case <-time.After(3 * time.Second): - logger.Warning(context.Background(), "SEND_EVENT_TIMEOUT", "send event timeout", event) - } + buffer <- event } func (m *metaCollector) sendInBackground() { @@ -135,6 +324,7 @@ func (m *metaCollector) sendInBackground() { } group.Events = group.Events[:0] } + lastSendClusterTime := time.Now() for { select { case e := <-m.entityBuffer: @@ -157,17 +347,83 @@ func (m *metaCollector) sendInBackground() { case <-m.stopCh: return } + if time.Since(lastSendClusterTime) > time.Duration(m.serviceK8sMeta.Interval)*time.Second { + // send cluster entity if in infra domain + if m.serviceK8sMeta.Domain == "infra" { + clusterEntity := m.generateClusterEntity() + m.collector.AddRawLog(convertPipelineEvent2Log(clusterEntity)) + lastSendClusterTime = time.Now() + } + } } } +func (m *metaCollector) genKey(namespace, name string) string { + key := m.serviceK8sMeta.clusterID + namespace + name + // #nosec G401 + return fmt.Sprintf("%x", md5.Sum([]byte(key))) +} + +func (m *metaCollector) generateClusterEntity() models.PipelineEvent { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + log.Contents.Add(entityDomainFieldName, m.serviceK8sMeta.Domain) + log.Contents.Add(entityTypeFieldName, "infra.k8s.cluster") + log.Contents.Add(entityIDFieldName, m.genKey("", "")) + log.Contents.Add(entityMethodFieldName, "Update") + log.Contents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(time.Now().Unix(), 10)) + log.Contents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(time.Now().Unix(), 10)) + log.Contents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval*2), 10)) + log.Contents.Add(entityCategoryFieldName, defaultEntityCategory) + log.Contents.Add(entityClusterIDFieldName, m.serviceK8sMeta.clusterID) + return log +} + +func (m *metaCollector) generateEntityClusterLink(entityEvent models.PipelineEvent) models.PipelineEvent { + content := entityEvent.(*models.Log).Contents + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Contents.Add(entityLinkSrcDomainFieldName, m.serviceK8sMeta.Domain) + log.Contents.Add(entityLinkSrcEntityTypeFieldName, content.Get(entityTypeFieldName)) + log.Contents.Add(entityLinkSrcEntityIDFieldName, content.Get(entityIDFieldName)) + + log.Contents.Add(entityLinkDestDomainFieldName, m.serviceK8sMeta.Domain) + log.Contents.Add(entityLinkDestEntityTypeFieldName, "ack.cluster") + log.Contents.Add(entityLinkDestEntityIDFieldName, m.serviceK8sMeta.clusterID) + + log.Contents.Add(entityLinkRelationTypeFieldName, "runs") + log.Contents.Add(entityMethodFieldName, content.Get(entityMethodFieldName)) + + log.Contents.Add(entityFirstObservedTimeFieldName, content.Get(entityFirstObservedTimeFieldName)) + log.Contents.Add(entityLastObservedTimeFieldName, content.Get(entityLastObservedTimeFieldName)) + log.Contents.Add(entityKeepAliveSecondsFieldName, m.serviceK8sMeta.Interval*2) + log.Contents.Add(entityCategoryFieldName, defaultEntityLinkCategory) + log.Contents.Add(entityClusterIDFieldName, m.serviceK8sMeta.clusterID) + log.Timestamp = uint64(time.Now().Unix()) + return log +} + +func (m *metaCollector) genEntityTypeKey(kind string) string { + prefix := "" + if p, ok := DomainEntityTypePrefix[m.serviceK8sMeta.Domain]; ok { + prefix = p + } + return fmt.Sprintf("%s%s", prefix, strings.ToLower(kind)) +} + func convertPipelineEvent2Log(event models.PipelineEvent) *protocol.Log { if modelLog, ok := event.(*models.Log); ok { log := &protocol.Log{} log.Contents = make([]*protocol.Log_Content, 0) for k, v := range modelLog.Contents.Iterator() { if _, ok := v.(string); !ok { - logger.Error(context.Background(), "COVERT_EVENT_TO_LOG_FAIL", "convert event to log fail, value is not string", v) - continue + if intValue, ok := v.(int); !ok { + logger.Error(context.Background(), "COVERT_EVENT_TO_LOG_FAIL", "convert event to log fail, value is not string", v, "key", k) + continue + } else { + v = strconv.Itoa(intValue) + } } log.Contents = append(log.Contents, &protocol.Log_Content{Key: k, Value: v.(string)}) } @@ -177,20 +433,6 @@ func convertPipelineEvent2Log(event models.PipelineEvent) *protocol.Log { return nil } -func genKeyByPod(pod *v1.Pod) string { - return genKey(pod.Namespace, pod.Kind, pod.Name) -} - -func genKey(namespace, kind, name string) string { - key := namespace + kind + name - // #nosec G401 - return fmt.Sprintf("%x", md5.Sum([]byte(key))) -} - -func genKeyByService(service *v1.Service) string { - return genKey(service.Namespace, service.Kind, service.Name) -} - -func isLink(resourceType string) bool { - return strings.Contains(resourceType, k8smeta.LINK_SPLIT_CHARACTER) +func isEntity(resourceType string) bool { + return !strings.Contains(resourceType, k8smeta.LINK_SPLIT_CHARACTER) } diff --git a/plugins/input/kubernetesmetav2/meta_collector_app.go b/plugins/input/kubernetesmetav2/meta_collector_app.go new file mode 100644 index 0000000000..0a59ea1f4f --- /dev/null +++ b/plugins/input/kubernetesmetav2/meta_collector_app.go @@ -0,0 +1,172 @@ +package kubernetesmetav2 + +import ( + "strconv" + "time" + + app "k8s.io/api/apps/v1" //nolint:typecheck + + "github.com/alibaba/ilogtail/pkg/helper/k8smeta" + "github.com/alibaba/ilogtail/pkg/models" +) + +func (m *metaCollector) processDeploymentEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*app.Deployment); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("match_labels", m.processEntityJSONObject(obj.Spec.Selector.MatchLabels)) + log.Contents.Add("replicas", strconv.FormatInt(int64(*obj.Spec.Replicas), 10)) + log.Contents.Add("ready_replicas", strconv.FormatInt(int64(obj.Status.ReadyReplicas), 10)) + containerInfos := []map[string]string{} + for _, container := range obj.Spec.Template.Spec.Containers { + containerInfo := map[string]string{ + "name": container.Name, + "image": container.Image, + } + containerInfos = append(containerInfos, containerInfo) + } + log.Contents.Add("containers", m.processEntityJSONArray(containerInfos)) + + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processDaemonSetEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*app.DaemonSet); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("match_labels", m.processEntityJSONObject(obj.Spec.Selector.MatchLabels)) + containerInfos := []map[string]string{} + for _, container := range obj.Spec.Template.Spec.Containers { + containerInfo := map[string]string{ + "name": container.Name, + "image": container.Image, + } + containerInfos = append(containerInfos, containerInfo) + } + log.Contents.Add("containers", m.processEntityJSONArray(containerInfos)) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processStatefulSetEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*app.StatefulSet); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("match_labels", m.processEntityJSONObject(obj.Spec.Selector.MatchLabels)) + log.Contents.Add("replicas", strconv.FormatInt(int64(*obj.Spec.Replicas), 10)) + containerInfos := []map[string]string{} + for _, container := range obj.Spec.Template.Spec.Containers { + containerInfo := map[string]string{ + "name": container.Name, + "image": container.Image, + } + containerInfos = append(containerInfos, containerInfo) + } + log.Contents.Add("containers", m.processEntityJSONArray(containerInfos)) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processReplicaSetEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*app.ReplicaSet); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("match_labels", m.processEntityJSONObject(obj.Spec.Selector.MatchLabels)) + log.Contents.Add("replicas", strconv.FormatInt(int64(*obj.Spec.Replicas), 10)) + containerInfos := []map[string]string{} + for _, container := range obj.Spec.Template.Spec.Containers { + containerInfo := map[string]string{ + "name": container.Name, + "image": container.Image, + } + containerInfos = append(containerInfos, containerInfo) + } + log.Contents.Add("containers", m.processEntityJSONArray(containerInfos)) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processReplicaSetDeploymentLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.ReplicaSetDeployment); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.ReplicaSet.Kind, obj.ReplicaSet.Namespace, obj.ReplicaSet.Name, obj.Deployment.Kind, obj.Deployment.Namespace, obj.Deployment.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodReplicaSetLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodReplicaSet); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.ReplicaSet.Kind, obj.ReplicaSet.Namespace, obj.ReplicaSet.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodStatefulSetLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodStatefulSet); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.StatefulSet.Kind, obj.StatefulSet.Namespace, obj.StatefulSet.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodDaemonSetLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodDaemonSet); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.DaemonSet.Kind, obj.DaemonSet.Namespace, obj.DaemonSet.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} diff --git a/plugins/input/kubernetesmetav2/meta_collector_batch.go b/plugins/input/kubernetesmetav2/meta_collector_batch.go new file mode 100644 index 0000000000..93832ad4f3 --- /dev/null +++ b/plugins/input/kubernetesmetav2/meta_collector_batch.go @@ -0,0 +1,84 @@ +package kubernetesmetav2 + +import ( + "strconv" + "time" + + batch "k8s.io/api/batch/v1" //nolint:typecheck + + "github.com/alibaba/ilogtail/pkg/helper/k8smeta" + "github.com/alibaba/ilogtail/pkg/models" +) + +func (m *metaCollector) processJobEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*batch.Job); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("status", obj.Status.String()) + containerInfos := []map[string]string{} + for _, container := range obj.Spec.Template.Spec.Containers { + containerInfo := map[string]string{ + "name": container.Name, + "image": container.Image, + } + containerInfos = append(containerInfos, containerInfo) + } + log.Contents.Add("containers", m.processEntityJSONArray(containerInfos)) + log.Contents.Add("suspend", strconv.FormatBool(*obj.Spec.Suspend)) + log.Contents.Add("backoff_limit", strconv.FormatInt(int64(*obj.Spec.BackoffLimit), 10)) + log.Contents.Add("completion", strconv.FormatInt(int64(*obj.Spec.Completions), 10)) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processCronJobEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*batch.CronJob); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("schedule", obj.Spec.Schedule) + log.Contents.Add("suspend", strconv.FormatBool(*obj.Spec.Suspend)) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processJobCronJobLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.JobCronJob); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Job.Kind, obj.Job.Namespace, obj.Job.Name, obj.CronJob.Kind, obj.CronJob.Namespace, obj.CronJob.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodJobLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodJob); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.Job.Kind, obj.Job.Namespace, obj.Job.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} diff --git a/plugins/input/kubernetesmetav2/meta_collector_const.go b/plugins/input/kubernetesmetav2/meta_collector_const.go index 1c30c3a1ea..8c8020bdeb 100644 --- a/plugins/input/kubernetesmetav2/meta_collector_const.go +++ b/plugins/input/kubernetesmetav2/meta_collector_const.go @@ -1,16 +1,18 @@ package kubernetesmetav2 const ( - entityDomainFieldName = "__domain__" - entityTypeFieldName = "__entity_type__" - entityIDFieldName = "__entity_id__" - entityMethodFieldName = "__method__" + entityDomainFieldName = "__domain__" + entityTypeFieldName = "__entity_type__" + entityIDFieldName = "__entity_id__" + entityMethodFieldName = "__method__" + entityClusterIDFieldName = "cluster_id" + entityKindFieldName = "kind" + entityNameFieldName = "name" + entityCreationTimeFieldName = "create_time" entityFirstObservedTimeFieldName = "__first_observed_time__" entityLastObservedTimeFieldName = "__last_observed_time__" entityKeepAliveSecondsFieldName = "__keep_alive_seconds__" - entityNamespaceFieldName = "namespace" - entityNameFieldName = "name" entityCategoryFieldName = "__category__" defaultEntityCategory = "entity" @@ -24,3 +26,8 @@ const ( entityLinkDestEntityIDFieldName = "__dest_entity_id__" entityLinkRelationTypeFieldName = "__relation_type__" ) + +var DomainEntityTypePrefix = map[string]string{ + "acs": "acs.ack.cluster.", + "infra": "infra.k8s.cluster.", +} diff --git a/plugins/input/kubernetesmetav2/meta_collector_core.go b/plugins/input/kubernetesmetav2/meta_collector_core.go new file mode 100644 index 0000000000..50642c1ef6 --- /dev/null +++ b/plugins/input/kubernetesmetav2/meta_collector_core.go @@ -0,0 +1,332 @@ +package kubernetesmetav2 + +import ( + "encoding/json" + "strconv" + "time" + + v1 "k8s.io/api/core/v1" + + "github.com/alibaba/ilogtail/pkg/helper/k8smeta" + "github.com/alibaba/ilogtail/pkg/models" +) + +func (m *metaCollector) processPodEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + result := []models.PipelineEvent{} + if obj, ok := data.Raw.(*v1.Pod); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("status", string(obj.Status.Phase)) + log.Contents.Add("instance_ip", obj.Status.PodIP) + containerInfos := []map[string]string{} + for _, container := range obj.Spec.Containers { + containerInfo := map[string]string{ + "name": container.Name, + "image": container.Image, + } + containerInfos = append(containerInfos, containerInfo) + } + log.Contents.Add("containers", m.processEntityJSONArray(containerInfos)) + result = append(result, log) + + // container + if m.serviceK8sMeta.Container { + for _, container := range obj.Spec.Containers { + containerLog := &models.Log{} + containerLog.Contents = models.NewLogContents() + containerLog.Timestamp = log.Timestamp + + containerLog.Contents.Add(entityDomainFieldName, m.serviceK8sMeta.Domain) + containerLog.Contents.Add(entityTypeFieldName, m.genEntityTypeKey("container")) + containerLog.Contents.Add(entityIDFieldName, m.genKey(obj.Namespace, obj.Name+container.Name)) + containerLog.Contents.Add(entityMethodFieldName, method) + + containerLog.Contents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(data.FirstObservedTime, 10)) + containerLog.Contents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(data.LastObservedTime, 10)) + containerLog.Contents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval*2), 10)) + containerLog.Contents.Add(entityCategoryFieldName, defaultEntityCategory) + + // common custom fields + containerLog.Contents.Add(entityClusterIDFieldName, m.serviceK8sMeta.clusterID) + containerLog.Contents.Add(entityNameFieldName, container.Name) + + // custom fields + containerLog.Contents.Add("pod_name", obj.Name) + containerLog.Contents.Add("pod_namespace", obj.Namespace) + containerLog.Contents.Add("image", container.Image) + containerLog.Contents.Add("cpu_request", container.Resources.Requests.Cpu().String()) + containerLog.Contents.Add("cpu_limit", container.Resources.Limits.Cpu().String()) + containerLog.Contents.Add("memory_request", container.Resources.Requests.Memory().String()) + containerLog.Contents.Add("memory_limit", container.Resources.Limits.Memory().String()) + ports := make([]int32, 0) + for _, port := range container.Ports { + ports = append(ports, port.ContainerPort) + } + portsStr, _ := json.Marshal(ports) + if len(ports) == 0 { + portsStr = []byte("[]") + } + containerLog.Contents.Add("container_ports", string(portsStr)) + volumes := make([]map[string]string, 0) + for _, volume := range container.VolumeMounts { + volumeInfo := map[string]string{ + "volumeMountName": volume.Name, + "volumeMountPath": volume.MountPath, + } + volumes = append(volumes, volumeInfo) + } + containerLog.Contents.Add("volumes", m.processEntityJSONArray(volumes)) + result = append(result, containerLog) + } + } + return result + } + return nil +} + +func (m *metaCollector) processNodeEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*v1.Node); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, "", obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + status := []map[string]string{} + for _, condition := range obj.Status.Conditions { + conditionInfo := map[string]string{ + "type": string(condition.Type), + "status": string(condition.Status), + } + status = append(status, conditionInfo) + } + log.Contents.Add("status", m.processEntityJSONArray(status)) + for _, addr := range obj.Status.Addresses { + if addr.Type == v1.NodeInternalIP { + log.Contents.Add("internal_ip", addr.Address) + } else if addr.Type == v1.NodeHostName { + log.Contents.Add("host_name", addr.Address) + } + } + capacityStr, _ := json.Marshal(obj.Status.Capacity) + log.Contents.Add("capacity", string(capacityStr)) + allocatableStr, _ := json.Marshal(obj.Status.Allocatable) + log.Contents.Add("allocatable", string(allocatableStr)) + addressStr, _ := json.Marshal(obj.Status.Addresses) + log.Contents.Add("addresses", string(addressStr)) + log.Contents.Add("provider_id", obj.Spec.ProviderID) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processServiceEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*v1.Service); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("selector", m.processEntityJSONObject(obj.Spec.Selector)) + log.Contents.Add("type", string(obj.Spec.Type)) + log.Contents.Add("cluster_ip", obj.Spec.ClusterIP) + ports := make([]map[string]string, 0) + for _, port := range obj.Spec.Ports { + portInfo := map[string]string{ + "port": strconv.FormatInt(int64(port.Port), 10), + "targetPort": strconv.FormatInt(int64(port.TargetPort.IntVal), 10), + "protocol": string(port.Protocol), + } + ports = append(ports, portInfo) + } + log.Contents.Add("ports", m.processEntityJSONArray(ports)) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processConfigMapEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*v1.ConfigMap); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processSecretEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*v1.Secret); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("type", string(obj.Type)) + + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processNamespaceEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*v1.Namespace); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, "", obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("kind", obj.Kind) + log.Contents.Add("name", obj.Name) + for k, v := range obj.Labels { + log.Contents.Add("label_"+k, v) + } + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPersistentVolumeEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*v1.PersistentVolume); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("status", string(obj.Status.Phase)) + log.Contents.Add("storage_class_name", obj.Spec.StorageClassName) + log.Contents.Add("persistent_volume_reclaim_policy", string(obj.Spec.PersistentVolumeReclaimPolicy)) + log.Contents.Add("volume_mode", string(*obj.Spec.VolumeMode)) + log.Contents.Add("capacity", obj.Spec.Capacity.Storage().String()) + log.Contents.Add("fsType", obj.Spec.CSI.FSType) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPersistentVolumeClaimEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*v1.PersistentVolumeClaim); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("status", string(obj.Status.Phase)) + log.Contents.Add("storeage_requests", obj.Spec.Resources.Requests.Storage().String()) + log.Contents.Add("storage_class_name", obj.Spec.StorageClassName) + log.Contents.Add("volume_name", obj.Spec.VolumeName) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodNodeLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.NodePod); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.Node.Kind, "", obj.Node.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodPVCLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodPersistentVolumeClaim); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.PersistentVolumeClaim.Kind, obj.PersistentVolumeClaim.Namespace, obj.PersistentVolumeClaim.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodConfigMapLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodConfigMap); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.ConfigMap.Kind, obj.ConfigMap.Namespace, obj.ConfigMap.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodSecretLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodSecret); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.Secret.Kind, obj.Secret.Namespace, obj.Secret.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodServiceLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodService); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.Service.Kind, obj.Service.Namespace, obj.Service.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} + +func (m *metaCollector) processPodContainerLink(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*k8smeta.PodContainer); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, "container", obj.Pod.Namespace, obj.Container.Name, method, data.FirstObservedTime, data.LastObservedTime) + log.Contents.Add(entityLinkRelationTypeFieldName, "contains") + log.Timestamp = uint64(time.Now().Unix()) + return []models.PipelineEvent{log} + } + return nil +} diff --git a/plugins/input/kubernetesmetav2/meta_collector_pod_test.go b/plugins/input/kubernetesmetav2/meta_collector_core_test.go similarity index 77% rename from plugins/input/kubernetesmetav2/meta_collector_pod_test.go rename to plugins/input/kubernetesmetav2/meta_collector_core_test.go index bdf3ff3946..e2c3930706 100644 --- a/plugins/input/kubernetesmetav2/meta_collector_pod_test.go +++ b/plugins/input/kubernetesmetav2/meta_collector_core_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + app "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -69,8 +70,40 @@ func TestProcessPodEntity(t *testing.T) { assert.NotNilf(t, log, "log should not be nil") } +func TestProcessServiceEntity(t *testing.T) { + obj := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + UID: "uid2", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "port1", + Port: 80, + }, + { + Name: "port2", + Port: 8080, + }, + }, + }, + } + objWrapper := &k8smeta.ObjectWrapper{ + Raw: obj, + } + collector := &metaCollector{ + serviceK8sMeta: &ServiceK8sMeta{ + Interval: 10, + }, + } + log := collector.processServiceEntity(objWrapper, "create") + assert.NotNilf(t, log, "log should not be nil") +} + func TestProcessPodReplicasetLink(t *testing.T) { - obj := &v1.Pod{ + pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns2", @@ -83,15 +116,24 @@ func TestProcessPodReplicasetLink(t *testing.T) { }, }, } + replicaSet := &app.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rs1", + Namespace: "ns2", + }, + } objWrapper := &k8smeta.ObjectWrapper{ - Raw: obj, + Raw: &k8smeta.PodReplicaSet{ + Pod: pod, + ReplicaSet: replicaSet, + }, } collector := &metaCollector{ serviceK8sMeta: &ServiceK8sMeta{ Interval: 10, }, } - log := collector.processPodReplicasetLink(objWrapper, "create") + log := collector.processPodReplicaSetLink(objWrapper, "create") assert.NotNilf(t, log, "log should not be nil") } @@ -111,7 +153,7 @@ func TestProcessPodReplicasetLinkNoOwner(t *testing.T) { Interval: 10, }, } - log := collector.processPodReplicasetLink(objWrapper, "create") + log := collector.processPodReplicaSetLink(objWrapper, "create") assert.Nilf(t, log, "log should not be nil") } diff --git a/plugins/input/kubernetesmetav2/meta_collector_networking.go b/plugins/input/kubernetesmetav2/meta_collector_networking.go new file mode 100644 index 0000000000..f5065b0057 --- /dev/null +++ b/plugins/input/kubernetesmetav2/meta_collector_networking.go @@ -0,0 +1,27 @@ +package kubernetesmetav2 + +import ( + "time" + + networking "k8s.io/api/networking/v1" //nolint:typecheck + + "github.com/alibaba/ilogtail/pkg/helper/k8smeta" + "github.com/alibaba/ilogtail/pkg/models" +) + +func (m *metaCollector) processIngressEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*networking.Ingress); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("namespace", obj.Namespace) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + return []models.PipelineEvent{log} + } + return nil +} diff --git a/plugins/input/kubernetesmetav2/meta_collector_pod.go b/plugins/input/kubernetesmetav2/meta_collector_pod.go deleted file mode 100644 index a5b2ce5b65..0000000000 --- a/plugins/input/kubernetesmetav2/meta_collector_pod.go +++ /dev/null @@ -1,123 +0,0 @@ -package kubernetesmetav2 - -import ( - "strconv" - "time" - - v1 "k8s.io/api/core/v1" - - "github.com/alibaba/ilogtail/pkg/helper/k8smeta" - "github.com/alibaba/ilogtail/pkg/models" -) - -func (m *metaCollector) processPodEntity(data *k8smeta.ObjectWrapper, method string) models.PipelineEvent { - if obj, ok := data.Raw.(*v1.Pod); ok { - log := &models.Log{} - log.Contents = models.NewLogContents() - log.Contents.Add(entityDomainFieldName, "k8s") - log.Contents.Add(entityTypeFieldName, "pod") - log.Contents.Add(entityIDFieldName, genKeyByPod(obj)) - log.Contents.Add(entityMethodFieldName, method) - - log.Contents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(data.FirstObservedTime, 10)) - log.Contents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(data.LastObservedTime, 10)) - log.Contents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval), 10)) - - log.Contents.Add(entityNamespaceFieldName, obj.Namespace) - log.Contents.Add(entityNameFieldName, obj.Name) - log.Contents.Add(entityCategoryFieldName, defaultEntityCategory) - log.Timestamp = uint64(time.Now().Unix()) - - // custom fields - log.Contents.Add("apiVersion", obj.APIVersion) - log.Contents.Add("kind", "pod") - log.Contents.Add("name", obj.Name) - log.Contents.Add("namespace", obj.Namespace) - for k, v := range obj.Labels { - log.Contents.Add("_label_"+k, v) - } - for k, v := range obj.Annotations { - log.Contents.Add("_annotations_"+k, v) - } - for i, container := range obj.Spec.Containers { - log.Contents.Add("container_"+strconv.FormatInt(int64(i), 10)+"_name", container.Name) - log.Contents.Add("container_"+strconv.FormatInt(int64(i), 10)+"_image", container.Image) - } - return log - } - return nil -} - -func (m *metaCollector) processPodReplicasetLink(data *k8smeta.ObjectWrapper, method string) models.PipelineEvent { - if obj, ok := data.Raw.(*v1.Pod); ok { - log := &models.Log{} - log.Contents = models.NewLogContents() - log.Contents.Add(entityLinkSrcDomainFieldName, "k8s") - log.Contents.Add(entityLinkSrcEntityTypeFieldName, "pod") - log.Contents.Add(entityLinkSrcEntityIDFieldName, genKeyByPod(obj)) - - if len(obj.OwnerReferences) > 0 { - ownerReferences := obj.OwnerReferences[0] - log.Contents.Add(entityLinkDestDomainFieldName, "k8s") - log.Contents.Add(entityLinkDestEntityTypeFieldName, "replicaset") - log.Contents.Add(entityLinkDestEntityIDFieldName, genKey(obj.Namespace, ownerReferences.Kind, ownerReferences.Name)) - } else { - // 数据不完整就没有意义 - return nil - } - - log.Contents.Add(entityLinkRelationTypeFieldName, "contain") - - switch method { - case "create", "update": - log.Contents.Add(entityMethodFieldName, "update") - case "delete": - log.Contents.Add(entityMethodFieldName, method) - default: - // 数据不完整就没有意义 - return nil - } - - log.Contents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(data.FirstObservedTime, 10)) - log.Contents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(data.LastObservedTime, 10)) - log.Contents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval), 10)) - log.Contents.Add(entityCategoryFieldName, defaultEntityLinkCategory) - log.Timestamp = uint64(time.Now().Unix()) - return log - } - return nil -} - -func (m *metaCollector) processPodServiceLink(data *k8smeta.ObjectWrapper, method string) models.PipelineEvent { - if obj, ok := data.Raw.(*k8smeta.PodService); ok { - log := &models.Log{} - log.Contents = models.NewLogContents() - log.Contents.Add(entityLinkSrcDomainFieldName, "k8s") - log.Contents.Add(entityLinkSrcEntityTypeFieldName, "pod") - log.Contents.Add(entityLinkSrcEntityIDFieldName, genKeyByPod(obj.Pod)) - - log.Contents.Add(entityLinkDestDomainFieldName, "k8s") - log.Contents.Add(entityLinkDestEntityTypeFieldName, "service") - log.Contents.Add(entityLinkDestEntityIDFieldName, genKeyByService(obj.Service)) - - log.Contents.Add(entityLinkRelationTypeFieldName, "link") - - switch method { - case "create", "update": - log.Contents.Add(entityMethodFieldName, "update") - case "delete": - log.Contents.Add(entityMethodFieldName, method) - default: - // 数据不完整就没有意义 - return nil - } - - log.Contents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(data.FirstObservedTime, 10)) - log.Contents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(data.LastObservedTime, 10)) - log.Contents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval), 10)) - log.Contents.Add(entityCategoryFieldName, defaultEntityLinkCategory) - log.Timestamp = uint64(time.Now().Unix()) - return log - } - return nil -} diff --git a/plugins/input/kubernetesmetav2/meta_collector_service.go b/plugins/input/kubernetesmetav2/meta_collector_service.go deleted file mode 100644 index 941363f09c..0000000000 --- a/plugins/input/kubernetesmetav2/meta_collector_service.go +++ /dev/null @@ -1,55 +0,0 @@ -package kubernetesmetav2 - -import ( - "strconv" - "time" - - v1 "k8s.io/api/core/v1" - - "github.com/alibaba/ilogtail/pkg/helper/k8smeta" - "github.com/alibaba/ilogtail/pkg/models" -) - -func (m *metaCollector) processServiceEntity(data *k8smeta.ObjectWrapper, method string) models.PipelineEvent { - if obj, ok := data.Raw.(*v1.Service); ok { - log := &models.Log{} - log.Contents = models.NewLogContents() - log.Contents.Add(entityDomainFieldName, "k8s") - log.Contents.Add(entityTypeFieldName, "service") - log.Contents.Add(entityIDFieldName, genKeyByService(obj)) - log.Contents.Add(entityMethodFieldName, method) - - log.Contents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(data.FirstObservedTime, 10)) - log.Contents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(data.LastObservedTime, 10)) - log.Contents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval), 10)) - - log.Contents.Add(entityNamespaceFieldName, obj.Namespace) - log.Contents.Add(entityNameFieldName, obj.Name) - log.Contents.Add(entityCategoryFieldName, defaultEntityCategory) - log.Timestamp = uint64(time.Now().Unix()) - - // custom fields - log.Contents.Add("apiVersion", obj.APIVersion) - log.Contents.Add("kind", "service") - log.Contents.Add("name", obj.Name) - log.Contents.Add("namespace", obj.Namespace) - for k, v := range obj.Labels { - log.Contents.Add("_label_"+k, v) - } - for k, v := range obj.Annotations { - log.Contents.Add("_annotations_"+k, v) - } - for k, v := range obj.Spec.Selector { - log.Contents.Add("_spec_selector_"+k, v) - } - log.Contents.Add("_spec_type_", string(obj.Spec.Type)) - log.Contents.Add("_cluster_ip_", obj.Spec.ClusterIP) - for i, port := range obj.Spec.Ports { - log.Contents.Add("_ports_"+strconv.FormatInt(int64(i), 10)+"_port", strconv.FormatInt(int64(port.Port), 10)) - log.Contents.Add("_ports_"+strconv.FormatInt(int64(i), 10)+"_targetPort", port.TargetPort.StrVal) - log.Contents.Add("_ports_"+strconv.FormatInt(int64(i), 10)+"_protocol", string(port.Protocol)) - } - return log - } - return nil -} diff --git a/plugins/input/kubernetesmetav2/meta_collector_service_test.go b/plugins/input/kubernetesmetav2/meta_collector_service_test.go deleted file mode 100644 index 68b627d179..0000000000 --- a/plugins/input/kubernetesmetav2/meta_collector_service_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package kubernetesmetav2 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/alibaba/ilogtail/pkg/helper/k8smeta" -) - -func TestProcessServiceEntity(t *testing.T) { - obj := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - UID: "uid2", - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: "port1", - Port: 80, - }, - { - Name: "port2", - Port: 8080, - }, - }, - }, - } - objWrapper := &k8smeta.ObjectWrapper{ - Raw: obj, - } - collector := &metaCollector{ - serviceK8sMeta: &ServiceK8sMeta{ - Interval: 10, - }, - } - log := collector.processServiceEntity(objWrapper, "create") - assert.NotNilf(t, log, "log should not be nil") -} diff --git a/plugins/input/kubernetesmetav2/meta_collector_storage.go b/plugins/input/kubernetesmetav2/meta_collector_storage.go new file mode 100644 index 0000000000..bea39b5c8b --- /dev/null +++ b/plugins/input/kubernetesmetav2/meta_collector_storage.go @@ -0,0 +1,28 @@ +package kubernetesmetav2 + +import ( + "time" + + storage "k8s.io/api/storage/v1" //nolint:typecheck + + "github.com/alibaba/ilogtail/pkg/helper/k8smeta" + "github.com/alibaba/ilogtail/pkg/models" +) + +func (m *metaCollector) processStorageClassEntity(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent { + if obj, ok := data.Raw.(*storage.StorageClass); ok { + log := &models.Log{} + log.Contents = models.NewLogContents() + log.Timestamp = uint64(time.Now().Unix()) + m.processEntityCommonPart(log.Contents, obj.Kind, obj.Namespace, obj.Name, method, data.FirstObservedTime, data.LastObservedTime, obj.CreationTimestamp) + + // custom fields + log.Contents.Add("api_version", obj.APIVersion) + log.Contents.Add("labels", m.processEntityJSONObject(obj.Labels)) + log.Contents.Add("annotations", m.processEntityJSONObject(obj.Annotations)) + log.Contents.Add("reclaim_policy", string(*obj.ReclaimPolicy)) + log.Contents.Add("volume_binding_mode", string(*obj.VolumeBindingMode)) + return []models.PipelineEvent{log} + } + return nil +} diff --git a/plugins/input/kubernetesmetav2/service_meta.go b/plugins/input/kubernetesmetav2/service_meta.go index dbdf999da5..1e12b560a0 100644 --- a/plugins/input/kubernetesmetav2/service_meta.go +++ b/plugins/input/kubernetesmetav2/service_meta.go @@ -1,22 +1,25 @@ package kubernetesmetav2 import ( + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/helper/k8smeta" "github.com/alibaba/ilogtail/pkg/models" "github.com/alibaba/ilogtail/pkg/pipeline" ) -type ProcessFunc func(data *k8smeta.ObjectWrapper, method string) models.PipelineEvent +type ProcessFunc func(data *k8smeta.ObjectWrapper, method string) []models.PipelineEvent //revive:disable:exported type ServiceK8sMeta struct { //revive:enable:exported Interval int + Domain string // entity switch Pod bool Node bool Service bool Deployment bool + ReplicaSet bool DaemonSet bool StatefulSet bool Configmap bool @@ -28,14 +31,13 @@ type ServiceK8sMeta struct { PersistentVolumeClaim bool StorageClass bool Ingress bool - // entity link switch - PodReplicasetLink bool - PodServiceLink bool + Container bool // other metaManager *k8smeta.MetaManager collector pipeline.Collector metaCollector *metaCollector configName string + clusterID string } // Init called for init some system resources, like socket, mutex... @@ -61,12 +63,11 @@ func (s *ServiceK8sMeta) Start(collector pipeline.Collector) error { s.collector = collector s.metaCollector = &metaCollector{ serviceK8sMeta: s, - processors: make(map[string][]ProcessFunc), collector: collector, - entityTypes: []string{}, entityBuffer: make(chan models.PipelineEvent, 100), entityLinkBuffer: make(chan models.PipelineEvent, 100), stopCh: make(chan struct{}), + entityProcessor: make(map[string]ProcessFunc), } return s.metaCollector.Start() } @@ -74,7 +75,8 @@ func (s *ServiceK8sMeta) Start(collector pipeline.Collector) error { func init() { pipeline.ServiceInputs["service_kubernetes_meta"] = func() pipeline.ServiceInput { return &ServiceK8sMeta{ - Interval: 30, + Interval: 60, + clusterID: *flags.ClusterID, } } } diff --git a/plugins/input/prometheus/kubernetes_cluster_mode.go b/plugins/input/prometheus/kubernetes_cluster_mode.go index c883ac8c66..7432aedaa7 100644 --- a/plugins/input/prometheus/kubernetes_cluster_mode.go +++ b/plugins/input/prometheus/kubernetes_cluster_mode.go @@ -15,10 +15,10 @@ package prometheus import ( + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/pipeline" - "github.com/alibaba/ilogtail/plugin_main/flags" "context" "errors" diff --git a/test/engine/steps.go b/test/engine/steps.go index a104af2651..8e5e0d59ee 100644 --- a/test/engine/steps.go +++ b/test/engine/steps.go @@ -38,6 +38,8 @@ func ScenarioInitializer(ctx *godog.ScenarioContext) { ctx.When(`^generate \{(\d+)\} apsara logs to file \{(.*)\}, with interval \{(\d+)\}ms$`, trigger.Apsara) ctx.When(`^generate \{(\d+)\} delimiter logs to file \{(.*)\}, with interval \{(\d+)\}ms$`, trigger.DelimiterSingle) ctx.When(`^query through \{(.*)\}`, control.SetQuery) + ctx.When(`^begin trigger`, trigger.BeginTrigger) + ctx.When(`^execute \{(\d+)\} commands to generate file security events on files \{(.*)\}$`, trigger.TrigerFileSecurityEvents) // Then ctx.Then(`^there is \{(\d+)\} logs$`, verify.LogCount) diff --git a/test/engine/trigger/ebpf_trigger.go b/test/engine/trigger/ebpf_trigger.go new file mode 100644 index 0000000000..6b0aaa5d87 --- /dev/null +++ b/test/engine/trigger/ebpf_trigger.go @@ -0,0 +1,104 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package trigger + +import ( + "context" + "html/template" + "strings" + "time" + + "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/setup" +) + +const triggerFileSecurityTemplate = "cd {{.WorkDir}} && COMMAND_CNT={{.CommandCnt}} FILE_NAME={{.FileName}} {{.Command}}" + +func TrigerFileSecurityEvents(ctx context.Context, commandCnt int, filenames string) (context.Context, error) { + time.Sleep(5 * time.Second) + if err := rwFile(ctx, commandCnt, filenames); err != nil { + return ctx, err + } + if err := mmapFile(ctx, commandCnt, filenames); err != nil { + return ctx, err + } + if err := truncateFile(ctx, commandCnt, filenames); err != nil { + return ctx, err + } + return ctx, nil +} + +func rwFile(ctx context.Context, commandCnt int, filenames string) error { + files := strings.Split(filenames, ",") + for _, file := range files { + touchFileCommand := "touch " + file + ";" + if err := setup.Env.ExecOnSource(ctx, touchFileCommand); err != nil { + return err + } + catFileCommand := "echo 'Hello, World!' >> " + file + ";" + for i := 0; i < commandCnt; i++ { + if err := setup.Env.ExecOnSource(ctx, catFileCommand); err != nil { + return err + } + } + removeCommand := "rm " + file + ";" + if err := setup.Env.ExecOnSource(ctx, removeCommand); err != nil { + return err + } + } + return nil +} + +func mmapFile(ctx context.Context, commandCnt int, filenames string) error { + mmapFileCommand := getRunTriggerCommand("TestGenerateMmapCommand") + files := strings.Split(filenames, ",") + for _, file := range files { + var triggerEBPFCommand strings.Builder + template := template.Must(template.New("trigger").Parse(triggerFileSecurityTemplate)) + if err := template.Execute(&triggerEBPFCommand, map[string]interface{}{ + "WorkDir": config.TestConfig.WorkDir, + "CommandCnt": commandCnt, + "FileName": file, + "Command": mmapFileCommand, + }); err != nil { + return err + } + if err := setup.Env.ExecOnSource(ctx, triggerEBPFCommand.String()); err != nil { + return err + } + } + return nil +} + +func truncateFile(ctx context.Context, commandCnt int, filenames string) error { + files := strings.Split(filenames, ",") + for _, file := range files { + truncateFileCommand1 := "truncate -s 10k " + file + ";" + truncateFileCommand2 := "truncate -s 0 " + file + ";" + for i := 0; i < commandCnt/2; i++ { + if err := setup.Env.ExecOnSource(ctx, truncateFileCommand1); err != nil { + return err + } + if err := setup.Env.ExecOnSource(ctx, truncateFileCommand2); err != nil { + return err + } + } + if commandCnt%2 != 0 { + if err := setup.Env.ExecOnSource(ctx, truncateFileCommand1); err != nil { + return err + } + } + } + return nil +} diff --git a/test/engine/trigger/apsara_test.go b/test/engine/trigger/generator/apsara_test.go similarity index 99% rename from test/engine/trigger/apsara_test.go rename to test/engine/trigger/generator/apsara_test.go index 8efb323d23..f6cacf8996 100644 --- a/test/engine/trigger/apsara_test.go +++ b/test/engine/trigger/generator/apsara_test.go @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package trigger +package generator import ( "fmt" diff --git a/test/engine/trigger/delimiter_test.go b/test/engine/trigger/generator/delimiter_test.go similarity index 99% rename from test/engine/trigger/delimiter_test.go rename to test/engine/trigger/generator/delimiter_test.go index 321056886e..7388636cd1 100644 --- a/test/engine/trigger/delimiter_test.go +++ b/test/engine/trigger/generator/delimiter_test.go @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package trigger +package generator import ( "fmt" diff --git a/test/engine/trigger/generator/ebpf_file_mmap_test.go b/test/engine/trigger/generator/ebpf_file_mmap_test.go new file mode 100644 index 0000000000..618b9af1b4 --- /dev/null +++ b/test/engine/trigger/generator/ebpf_file_mmap_test.go @@ -0,0 +1,51 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package generator + +import ( + "os" + "strconv" + "syscall" + "testing" +) + +func TestGenerateMmapCommand(t *testing.T) { + commandCnt := getEnvOrDefault("COMMAND_CNT", "10") + commandCntNum, err := strconv.Atoi(commandCnt) + if err != nil { + t.Fatalf("parse COMMAND_CNT failed: %v", err) + return + } + filename := getEnvOrDefault("FILE_NAME", "/tmp/ilogtail/ebpfFileSecurityHook3.log") + f, err := os.Create(filename) + if err != nil { + panic(err) + } + fd := int(f.Fd()) + for i := 0; i < commandCntNum; i++ { + b, innerErr := syscall.Mmap(fd, 0, 20, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if innerErr != nil { + panic(innerErr) + } + innerErr = syscall.Munmap(b) + if innerErr != nil { + panic(innerErr) + } + } + err = os.Remove(filename) + if err != nil { + t.Fatalf("remove file failed: %v", err) + return + } +} diff --git a/test/engine/trigger/generator/helper.go b/test/engine/trigger/generator/helper.go new file mode 100644 index 0000000000..ed80e86727 --- /dev/null +++ b/test/engine/trigger/generator/helper.go @@ -0,0 +1,25 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package generator + +import ( + "os" +) + +func getEnvOrDefault(env, fallback string) string { + if value, ok := os.LookupEnv(env); ok { + return value + } + return fallback +} diff --git a/test/engine/trigger/regex_test.go b/test/engine/trigger/generator/regex_test.go similarity index 99% rename from test/engine/trigger/regex_test.go rename to test/engine/trigger/generator/regex_test.go index ff7c998068..b5950f3307 100644 --- a/test/engine/trigger/regex_test.go +++ b/test/engine/trigger/generator/regex_test.go @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package trigger +package generator import ( "bytes" diff --git a/test/engine/trigger/helper.go b/test/engine/trigger/helper.go index 6817e07bfd..8e0fb12171 100644 --- a/test/engine/trigger/helper.go +++ b/test/engine/trigger/helper.go @@ -15,17 +15,9 @@ package trigger import ( "fmt" - "os" ) -const commandTemplate = "/usr/local/go/bin/go test -count=1 -v -run ^%s$ github.com/alibaba/ilogtail/test/engine/trigger" - -func getEnvOrDefault(env, fallback string) string { - if value, ok := os.LookupEnv(env); ok { - return value - } - return fallback -} +const commandTemplate = "/usr/local/go/bin/go test -count=1 -v -run ^%s$ github.com/alibaba/ilogtail/test/engine/trigger/generator" func getRunTriggerCommand(triggerName string) string { return fmt.Sprintf(commandTemplate, triggerName) diff --git a/test/engine/trigger/trigger.go b/test/engine/trigger/trigger.go index 9d2fd0f81c..75b038e823 100644 --- a/test/engine/trigger/trigger.go +++ b/test/engine/trigger/trigger.go @@ -65,3 +65,8 @@ func generate(ctx context.Context, totalLog int, path string, interval int, comm } return context.WithValue(ctx, config.StartTimeContextKey, int32(startTime)), nil } + +func BeginTrigger(ctx context.Context) (context.Context, error) { + startTime := time.Now().Unix() + return context.WithValue(ctx, config.StartTimeContextKey, int32(startTime)), nil +} diff --git a/test/go.mod b/test/go.mod index 7535b25823..c6f5af3f05 100644 --- a/test/go.mod +++ b/test/go.mod @@ -90,8 +90,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/onsi/gomega v1.19.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/gomega v1.19.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/opencontainers/runc v1.1.3 // indirect @@ -108,6 +108,7 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.11.2 // indirect go.opentelemetry.io/otel/trace v1.11.2 // indirect + golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 golang.org/x/net v0.11.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/sys v0.9.0 // indirect diff --git a/test/go.sum b/test/go.sum index 34c917a9a0..e7d5a1abe2 100644 --- a/test/go.sum +++ b/test/go.sum @@ -1127,6 +1127,7 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=