diff --git a/.github/workflows/build-core-ut.yaml b/.github/workflows/build-core-ut.yaml index a07999a774..16b9ed8ee1 100644 --- a/.github/workflows/build-core-ut.yaml +++ b/.github/workflows/build-core-ut.yaml @@ -82,7 +82,7 @@ jobs: run: make unittest_core - name: Unit Test Coverage - run: docker build -t unittest_coverage -f ./docker/Dockerfile_coverage . && docker run -v $(pwd):$(pwd) unittest_coverage bash -c "cd $(pwd)/core && gcovr --root . --lcov coverage.lcov --txt coverage.txt -e \".*sdk.*\" -e \".*observer.*\" -e \".*log_pb.*\" -e \".*unittest.*\" -e \".*config_server.*\" -e \".*fuse.*\" -e \".*go_pipeline.*\"" + run: docker build -t unittest_coverage -f ./docker/Dockerfile_coverage . && docker run -v $(pwd):$(pwd) unittest_coverage bash -c "cd $(pwd)/core && gcovr --root . --lcov coverage.lcov --txt coverage.txt -e \".*sdk.*\" -e \".*observer.*\" -e \".*protobuf.*\" -e \".*unittest.*\" -e \".*config_server.*\" -e \".*fuse.*\" -e \".*go_pipeline.*\"" - name: Setup Python3.10 uses: actions/setup-python@v5 diff --git a/.gitignore b/.gitignore index 7377d22c40..a3ebf3308a 100644 --- a/.gitignore +++ b/.gitignore @@ -55,7 +55,7 @@ _deps # Custom /build/ core/build/ -core/log_pb/*.pb.* +core/protobuf/sls/*.pb.* core/common/Version.cpp !/Makefile # Enterprise diff --git a/config_server/protocol/v2/README.md b/config_server/protocol/v2/README.md index 54b7bd088b..129cc14f40 100644 --- a/config_server/protocol/v2/README.md +++ b/config_server/protocol/v2/README.md @@ -22,7 +22,7 @@ string running_status = 8; // Human readable running status int64 startup_time = 9; // Required, Agent's startup time repeated ConfigInfo pipeline_configs = 10; // Information about the current PIPELINE_CONFIG held by the Agent - repeated ConfigInfo process_configs = 11; // Information about the current AGENT_CONFIG held by the Agent + repeated ConfigInfo instance_configs = 11; // Information about the current AGENT_CONFIG held by the Agent repeated CommandInfo custom_commands = 12; // Information about command history uint64 flags = 13; // Predefined command flag bytes opaque = 14; // Opaque data for extension @@ -74,8 +74,8 @@ UnspecifiedAgentCapability = 0; // The Agent can accept pipeline configuration from the Server. AcceptsPipelineConfig = 0x00000001; - // The Agent can accept process configuration from the Server. - AcceptsProcessConfig = 0x00000002; + // The Agent can accept instance configuration from the Server. + AcceptsInstanceConfig = 0x00000002; // The Agent can accept custom command from the Server. AcceptsCustomCommand = 0x00000004; @@ -100,7 +100,7 @@ uint64 capabilities = 3; // Bitmask of flags defined by ServerCapabilities enum repeated ConfigDetail pipeline_config_updates = 4; // Agent's pipeline config update status - repeated ConfigDetail process_config_updates = 5; // Agent's process config update status + repeated ConfigDetail instance_config_updates = 5; // Agent's instance config update status repeated CommandDetail custom_command_updates = 6; // Agent's commands updates uint64 flags = 7; // Predefined command flag bytes opaque = 8; // Opaque data for extension @@ -126,8 +126,8 @@ RembersAttribute = 0x00000001; // The Server can remember pipeline config status. RembersPipelineConfigStatus = 0x00000002; - // The Server can remember process config status. - RembersProcessConfigStatus = 0x00000004; + // The Server can remember instance config status. + RembersInstanceConfigStatus = 0x00000004; // The Server can remember custom command status. RembersCustomCommandStatus = 0x00000008; @@ -150,7 +150,7 @@ // restarted and lost state). ReportFullState = 0x00000001; FetchPipelineConfigDetail = 0x00000002; - FetchProcessConfigDetail = 0x00000004; + FetchInstanceConfigDetail = 0x00000004; // bits before 2^16 (inclusive) are reserved for future official fields } @@ -168,7 +168,7 @@ Server:应当通过capbilitiies上报Server自身的能力,这样如果新 Client:Agent启动后第一次向Server汇报全量信息,request字段应填尽填。request\_id、sequence\_num、capabilities、instance\_id、agent\_type、startup\_time为必填字段。 -Server:Server根据上报的信息返回响应。pipeline\_config\_updates、process\_config\_updates中包含agent需要同步的配置,updates中必然包含name和version,是否包含详情context和detail取决于server端实现。custom\_command_updates包含要求agent执行的命令command中必然包含type、name和expire\_time。 +Server:Server根据上报的信息返回响应。pipeline\_config\_updates、instance\_config\_updates中包含agent需要同步的配置,updates中必然包含name和version,是否包含详情context和detail取决于server端实现。custom\_command_updates包含要求agent执行的命令command中必然包含type、name和expire\_time。 Server是否保存Client信息取决于Server实现,如果服务端找不到或保存的sequence\_num + 1 ≠ 心跳的sequence\_num,那么就立刻返回并且flags中必须设置ReportFullStatus标识位。 @@ -192,15 +192,15 @@ Server:同注册 ### 进程配置 -若Server的注册/心跳响应中有process\_config\_updates.detail +若Server的注册/心跳响应中有instance\_config\_updates.detail Client:直接从response中获得detail,应用成功后下次心跳需要上报完整状态。 若Server的响应不包含detail -Client:根据process\_config\_updates的信息构造FetchProcessConfigRequest +Client:根据instance\_config\_updates的信息构造FetchInstanceConfigRequest -Server:返回FetchProcessConfigResponse +Server:返回FetchInstanceConfigResponse Client获取到多个进程配置时,自动合并,若产生冲突默认行为是未定义。 diff --git a/config_server/protocol/v2/agent.proto b/config_server/protocol/v2/agent.proto index ba53bfcff3..4774799c4b 100644 --- a/config_server/protocol/v2/agent.proto +++ b/config_server/protocol/v2/agent.proto @@ -49,8 +49,8 @@ enum AgentCapabilities { UnspecifiedAgentCapability = 0; // The Agent can accept pipeline configuration from the Server. AcceptsPipelineConfig = 0x00000001; - // The Agent can accept process configuration from the Server. - AcceptsProcessConfig = 0x00000002; + // The Agent can accept instance configuration from the Server. + AcceptsInstanceConfig = 0x00000002; // The Agent can accept custom command from the Server. AcceptsCustomCommand = 0x00000004; @@ -81,7 +81,7 @@ message HeartbeatRequest { string running_status = 8; // Human readable running status int64 startup_time = 9; // Required, Agent's startup time repeated ConfigInfo pipeline_configs = 10; // Information about the current PIPELINE_CONFIG held by the Agent - repeated ConfigInfo process_configs = 11; // Information about the current AGENT_CONFIG held by the Agent + repeated ConfigInfo instance_configs = 11; // Information about the current AGENT_CONFIG held by the Agent repeated CommandInfo custom_commands = 12; // Information about command history uint64 flags = 13; // Predefined command flag bytes opaque = 14; // Opaque data for extension @@ -102,11 +102,6 @@ message CommandDetail { int64 expire_time = 4; // After which the command can be safely removed from history } -message ServerErrorResponse { - int32 error_code = 1; // None-zero value indicates error - string error_message = 2; // Error message -} - enum ServerCapabilities { // The capabilities field is unspecified. UnspecifiedServerCapability = 0; @@ -114,8 +109,8 @@ enum ServerCapabilities { RembersAttribute = 0x00000001; // The Server can remember pipeline config status. RembersPipelineConfigStatus = 0x00000002; - // The Server can remember process config status. - RembersProcessConfigStatus = 0x00000004; + // The Server can remember instance config status. + RembersInstanceConfigStatus = 0x00000004; // The Server can remember custom command status. RembersCustomCommandStatus = 0x00000008; @@ -133,25 +128,25 @@ enum ResponseFlags { // restarted and lost state). ReportFullState = 0x00000001; FetchPipelineConfigDetail = 0x00000002; - FetchProcessConfigDetail = 0x00000004; + FetchInstanceConfigDetail = 0x00000004; // bits before 2^16 (inclusive) are reserved for future official fields } // ConfigServer's response to Agent's request message HeartbeatResponse { bytes request_id = 1; - ServerErrorResponse error_response = 2; // Set value indicates error + CommonResponse commonResponse = 2; // Set common response uint64 capabilities = 3; // Bitmask of flags defined by ServerCapabilities enum repeated ConfigDetail pipeline_config_updates = 4; // Agent's pipeline config update status - repeated ConfigDetail process_config_updates = 5; // Agent's process config update status + repeated ConfigDetail instance_config_updates = 5; // Agent's instance config update status repeated CommandDetail custom_command_updates = 6; // Agent's commands updates uint64 flags = 7; // Predefined command flag bytes opaque = 8; // Opaque data for extension } // API: /Agent/FetchPipelineConfig/ -// API: /Agent/FetchProcessConfig/ +// API: /Agent/FetchInstanceConfig/ // Agent request to ConfigServer, pulling details of the config message FetchConfigRequest { bytes request_id = 1; @@ -162,6 +157,12 @@ message FetchConfigRequest { // ConfigServer response to Agent's request message FetchConfigResponse { bytes request_id = 1; - ServerErrorResponse error_response = 2; + CommonResponse commonResponse = 2; repeated ConfigDetail config_details = 3; // config detail +} + +message CommonResponse +{ + int32 status = 1; + bytes errorMessage = 2; } \ No newline at end of file diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 959df80811..72220974de 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -102,19 +102,24 @@ set(PLUGIN_SOURCE_FILES_CORE "") set(PLUGIN_SOURCE_FILES_SPL "") include(${CMAKE_CURRENT_SOURCE_DIR}/common/common.cmake) include(${CMAKE_CURRENT_SOURCE_DIR}/common/links.cmake) -include(${CMAKE_CURRENT_SOURCE_DIR}/input/input.cmake) -include(${CMAKE_CURRENT_SOURCE_DIR}/input/links.cmake) -include(${CMAKE_CURRENT_SOURCE_DIR}/processor/processor.cmake) -include(${CMAKE_CURRENT_SOURCE_DIR}/processor/links.cmake) -include(${CMAKE_CURRENT_SOURCE_DIR}/flusher/flusher.cmake) -include(${CMAKE_CURRENT_SOURCE_DIR}/flusher/links.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/plugin/input/input.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/plugin/input/links.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/plugin/processor/processor.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/plugin/processor/links.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/plugin/flusher/flusher.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/plugin/flusher/links.cmake) # Subdirectories (modules). except for common, input, processor, flusher, observer, helper and spl. set(SUB_DIRECTORIES_LIST - batch application app_config checkpoint compression config config/feedbacker config/provider config/watcher config_manager config_server_pb/v1 config_server_pb/v2 - container_manager controller event event_handler event_listener file_server go_pipeline log_pb logger - models monitor parser pipeline plugin plugin/creator plugin/instance plugin/interface polling - profile_sender queue reader sdk sender serializer sls_control fuse prometheus prometheus/labels prometheus/schedulers prometheus/async sink/http route ebpf/observer ebpf/security ebpf/handler ebpf runner + application app_config checkpoint container_manager logger go_pipeline monitor profile_sender models + config config/feedbacker config/provider config/watcher + pipeline pipeline/batch pipeline/compression pipeline/limiter pipeline/plugin pipeline/plugin/creator pipeline/plugin/instance pipeline/plugin/interface pipeline/queue pipeline/route pipeline/serializer + runner runner/sink/http + protobuf/config_server/v1 protobuf/config_server/v2 protobuf/sls + file_server file_server/event file_server/event_handler file_server/event_listener file_server/reader file_server/polling + prometheus prometheus/labels prometheus/schedulers prometheus/async + ebpf ebpf/observer ebpf/security ebpf/handler + parser sls_control sdk fuse ) if (LINUX) if (ENABLE_ENTERPRISE) @@ -158,11 +163,11 @@ list(REMOVE_ITEM FRAMEWORK_SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/go_pipeline/ if(MSVC) # remove linux event listener - file(GLOB REMOVE_EVENT_LISTENER_SOURCES event_listener/*_Linux.cpp event_listener/*_Linux.h) + file(GLOB REMOVE_EVENT_LISTENER_SOURCES file_server/event_listener/*_Linux.cpp file_server/event_listener/*_Linux.h) list(REMOVE_ITEM FRAMEWORK_SOURCE_FILES ${REMOVE_EVENT_LISTENER_SOURCES}) elseif(UNIX) # remove windows event listener - file(GLOB REMOVE_EVENT_LISTENER_SOURCES event_listener/*_Windows.cpp event_listener/*_Windows.h) + file(GLOB REMOVE_EVENT_LISTENER_SOURCES file_server/event_listener/*_Windows.cpp file_server/event_listener/*_Windows.h) list(REMOVE_ITEM FRAMEWORK_SOURCE_FILES ${REMOVE_EVENT_LISTENER_SOURCES}) if (LINUX) # observer diff --git a/core/app_config/AppConfig.cpp b/core/app_config/AppConfig.cpp index 55a9b109df..e95ef285c0 100644 --- a/core/app_config/AppConfig.cpp +++ b/core/app_config/AppConfig.cpp @@ -21,12 +21,12 @@ #include "common/JsonUtil.h" #include "common/LogtailCommonFlags.h" #include "common/RuntimeUtil.h" -#include "config_manager/ConfigManager.h" +#include "file_server/ConfigManager.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "monitor/Monitor.h" -#include "reader/LogFileReader.h" +#include "file_server/reader/LogFileReader.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif @@ -149,7 +149,9 @@ DEFINE_FLAG_INT32(max_holded_data_size, DEFINE_FLAG_INT32(pub_max_holded_data_size, "for every id and metric name, the max data size can be holded in memory (default 512KB)", 512 * 1024); -DEFINE_FLAG_STRING(metrics_report_method, "method to report metrics (default none, means logtail will not report metrics)", "sls"); +DEFINE_FLAG_STRING(metrics_report_method, + "method to report metrics (default none, means logtail will not report metrics)", + "sls"); DEFINE_FLAG_STRING(loong_collector_operator_service, "loong collector operator service", ""); DEFINE_FLAG_INT32(loong_collector_operator_service_port, "loong collector operator service port", 8888); @@ -1055,8 +1057,8 @@ void AppConfig::RecurseParseJsonToFlags(const Json::Value& confJson, std::string SetConfigFlag(fullName, jsonvalue.toStyledString()); } else { APSARA_LOG_INFO(sLogger, - ("Set config flag failed", "can not convert json value to flag")("flag name", fullName)( - "jsonvalue", jsonvalue.toStyledString())); + ("Set config flag failed", "can not convert json value to flag")("flag name", fullName)( + "jsonvalue", jsonvalue.toStyledString())); } } } diff --git a/core/app_config/AppConfig.h b/core/app_config/AppConfig.h index 5553f31c74..69a129c068 100644 --- a/core/app_config/AppConfig.h +++ b/core/app_config/AppConfig.h @@ -24,7 +24,7 @@ #include #include "common/Lock.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { diff --git a/core/application/Application.cpp b/core/application/Application.cpp index 31beb0f1f1..d86fc6a92d 100644 --- a/core/application/Application.cpp +++ b/core/application/Application.cpp @@ -32,29 +32,30 @@ #include "common/version.h" #include "config/ConfigDiff.h" #include "config/watcher/ConfigWatcher.h" -#include "config_manager/ConfigManager.h" -#include "controller/EventDispatcher.h" -#include "event_handler/LogInput.h" +#include "file_server/EventDispatcher.h" +#include "file_server/event_handler/LogInput.h" +#include "file_server/ConfigManager.h" #include "file_server/FileServer.h" -#include "flusher/sls/DiskBufferWriter.h" +#include "plugin/flusher/sls/DiskBufferWriter.h" #include "go_pipeline/LogtailPlugin.h" -#include "input/InputFeedbackInterfaceRegistry.h" +#include "plugin/input/InputFeedbackInterfaceRegistry.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/MetricExportor.h" #include "monitor/Monitor.h" +#include "pipeline/InstanceConfigManager.h" #include "pipeline/PipelineManager.h" -#include "pipeline/ProcessConfigManager.h" -#include "plugin/PluginRegistry.h" -#include "processor/daemon/LogProcess.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/SenderQueueManager.h" -#include "sender/FlusherRunner.h" -#include "sink/http/HttpSink.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "runner/LogProcess.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/SenderQueueManager.h" +#include "runner/FlusherRunner.h" +#include "runner/sink/http/HttpSink.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #include "config/provider/LegacyConfigProvider.h" #if defined(__linux__) && !defined(__ANDROID__) +#include "common/LinuxDaemonUtil.h" #include "shennong/ShennongManager.h" #include "streamlog/StreamLogManager.h" #endif @@ -72,6 +73,10 @@ DEFINE_FLAG_INT32(profiling_check_interval, "seconds", 60); DEFINE_FLAG_INT32(tcmalloc_release_memory_interval, "force release memory held by tcmalloc, seconds", 300); DEFINE_FLAG_INT32(exit_flushout_duration, "exit process flushout duration", 20 * 1000); DEFINE_FLAG_INT32(queue_check_gc_interval_sec, "30s", 30); +#if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) +DEFINE_FLAG_BOOL(enable_cgroup, "", true); +#endif + DECLARE_FLAG_BOOL(send_prefer_real_ip); DECLARE_FLAG_BOOL(global_network_success); @@ -157,6 +162,12 @@ void Application::Init() { GenerateInstanceId(); TryGetUUID(); +#if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) + if (BOOL_FLAG(enable_cgroup)) { + CreateCGroup(); + } +#endif + int32_t systemBootTime = AppConfig::GetInstance()->GetSystemBootTime(); LogFileProfiler::mSystemBootTime = systemBootTime > 0 ? systemBootTime : GetSystemBootTime(); @@ -216,15 +227,15 @@ void Application::Start() { // GCOVR_EXCL_START { // add local config dir filesystem::path localConfigPath - = filesystem::path(AppConfig::GetInstance()->GetLogtailSysConfDir()) / "processconfig" / "local"; + = filesystem::path(AppConfig::GetInstance()->GetLogtailSysConfDir()) / "instanceconfig" / "local"; error_code ec; filesystem::create_directories(localConfigPath, ec); if (ec) { LOG_WARNING(sLogger, - ("failed to create dir for local processconfig", + ("failed to create dir for local instanceconfig", "manual creation may be required")("error code", ec.value())("error msg", ec.message())); } - ConfigWatcher::GetInstance()->AddProcessSource(localConfigPath.string()); + ConfigWatcher::GetInstance()->AddInstanceSource(localConfigPath.string()); } #ifdef __ENTERPRISE__ @@ -279,9 +290,9 @@ void Application::Start() { // GCOVR_EXCL_START if (!pipelineConfigDiff.IsEmpty()) { PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff); } - ProcessConfigDiff processConfigDiff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); - if (!processConfigDiff.IsEmpty()) { - ProcessConfigManager::GetInstance()->UpdateProcessConfigs(processConfigDiff); + InstanceConfigDiff instanceConfigDiff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); + if (!instanceConfigDiff.IsEmpty()) { + InstanceConfigManager::GetInstance()->UpdateInstanceConfigs(instanceConfigDiff); } lastConfigCheckTime = curTime; } @@ -298,6 +309,7 @@ void Application::Start() { // GCOVR_EXCL_START #endif if (curTime - lastQueueGCTime >= INT32_FLAG(queue_check_gc_interval_sec)) { ExactlyOnceQueueManager::GetInstance()->ClearTimeoutQueues(); + // this should be called in the same thread as config update SenderQueueManager::GetInstance()->ClearUnusedQueues(); lastQueueGCTime = curTime; } diff --git a/core/checkpoint/CheckPointManager.cpp b/core/checkpoint/CheckPointManager.cpp index e29d19b6be..fdec823c3b 100644 --- a/core/checkpoint/CheckPointManager.cpp +++ b/core/checkpoint/CheckPointManager.cpp @@ -25,7 +25,7 @@ #include "common/Flags.h" #include "common/HashUtil.h" #include "common/StringTools.h" -#include "config_manager/ConfigManager.h" +#include "file_server/ConfigManager.h" #include "file_server/FileDiscoveryOptions.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" diff --git a/core/checkpoint/CheckPointManager.h b/core/checkpoint/CheckPointManager.h index 21f98d174e..0d652b90d9 100644 --- a/core/checkpoint/CheckPointManager.h +++ b/core/checkpoint/CheckPointManager.h @@ -27,7 +27,7 @@ #include "common/DevInode.h" #include "common/EncodingConverter.h" #include "common/SplitedFilePath.h" -#include "reader/LogFileReader.h" +#include "file_server/reader/LogFileReader.h" #ifdef APSARA_UNIT_TEST_MAIN #include "AppConfig.h" diff --git a/core/checkpoint/CheckpointManagerV2.cpp b/core/checkpoint/CheckpointManagerV2.cpp index c6e5ca32fa..a05702cb86 100644 --- a/core/checkpoint/CheckpointManagerV2.cpp +++ b/core/checkpoint/CheckpointManagerV2.cpp @@ -21,7 +21,6 @@ #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" #include "app_config/AppConfig.h" -#include "config_manager/ConfigManager.h" #include "checkpoint/CheckPointManager.h" DEFINE_FLAG_INT32(logtail_checkpoint_check_gc_interval_sec, "60 seconds", 60); diff --git a/core/checkpoint/CheckpointManagerV2.h b/core/checkpoint/CheckpointManagerV2.h index 0291d1ffd5..fe895c2a05 100644 --- a/core/checkpoint/CheckpointManagerV2.h +++ b/core/checkpoint/CheckpointManagerV2.h @@ -22,8 +22,8 @@ #include #include #include -#include "log_pb/checkpoint.pb.h" -#include "input/InputFile.h" +#include "protobuf/sls/checkpoint.pb.h" +#include "plugin/input/InputFile.h" namespace logtail { diff --git a/core/checkpoint/RangeCheckpoint.h b/core/checkpoint/RangeCheckpoint.h index 266bc08970..ad2f9cc4dd 100644 --- a/core/checkpoint/RangeCheckpoint.h +++ b/core/checkpoint/RangeCheckpoint.h @@ -18,8 +18,8 @@ #include #include #include -#include "log_pb/checkpoint.pb.h" -#include "queue/QueueKey.h" +#include "protobuf/sls/checkpoint.pb.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { diff --git a/core/common/CompressTools.cpp b/core/common/CompressTools.cpp index 6c480b9d1d..fc7d753ee5 100644 --- a/core/common/CompressTools.cpp +++ b/core/common/CompressTools.cpp @@ -24,7 +24,7 @@ #include -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { diff --git a/core/common/CompressTools.h b/core/common/CompressTools.h index 63abc0bd27..9293150ff9 100644 --- a/core/common/CompressTools.h +++ b/core/common/CompressTools.h @@ -17,7 +17,7 @@ #pragma once #include #include -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { diff --git a/core/common/FileSystemUtil.cpp b/core/common/FileSystemUtil.cpp index 4129c0ab14..5d59f13024 100644 --- a/core/common/FileSystemUtil.cpp +++ b/core/common/FileSystemUtil.cpp @@ -21,7 +21,6 @@ #include #endif #include - #include #include "RuntimeUtil.h" @@ -164,6 +163,20 @@ bool OverwriteFile(const std::string& fileName, const std::string& content) { return true; } +bool WriteFile(const std::string& fileName, const std::string& content, std::string& errMsg) { + ofstream f(fileName, ios::trunc); + if (!f.is_open()) { + errMsg = "failed to open file " + fileName; + return false; + } + f.write(content.c_str(), content.size()); + if (f.fail()) { + errMsg = strerror(errno); + return false; + } + return true; +} + bool IsAccessibleDirectory(const std::string& dirPath) { boost::filesystem::directory_iterator end; try { diff --git a/core/common/FileSystemUtil.h b/core/common/FileSystemUtil.h index 33880669fd..c065b3818a 100644 --- a/core/common/FileSystemUtil.h +++ b/core/common/FileSystemUtil.h @@ -15,18 +15,19 @@ */ #pragma once -#include -#include -#include #include + #include +#include +#include +#include #if defined(__linux__) #include #elif defined(_MSC_VER) #include #endif -#include "ErrorUtil.h" #include "DevInode.h" +#include "ErrorUtil.h" #include "LogtailCommonFlags.h" // Filesystem utility. @@ -89,6 +90,8 @@ bool ReadFileContent(const std::string& fileName, std::string& content, uint32_t // OverwriteFile overwrides @fileName with @content. bool OverwriteFile(const std::string& fileName, const std::string& content); +bool WriteFile(const std::string& fileName, const std::string& content, std::string& errMsg); + // IsAccessibleDirectory checks if the @dirPath is a existent directory and accessible. // Accessibility means that we can iterate the contents of it. bool IsAccessibleDirectory(const std::string& dirPath); @@ -251,9 +254,7 @@ namespace fsutil { int64_t GetFileSize() const; // GetMode returns st_mode. - int GetMode() const { - return static_cast(mRawStat.st_mode); - } + int GetMode() const { return static_cast(mRawStat.st_mode); } }; } // namespace fsutil diff --git a/core/common/ParamExtractor.h b/core/common/ParamExtractor.h index 4edf8098a3..7ee050e066 100644 --- a/core/common/ParamExtractor.h +++ b/core/common/ParamExtractor.h @@ -147,6 +147,58 @@ bool GetOptionalListParam(const Json::Value& config, return true; } +template +bool GetOptionalListFilterParam(const Json::Value& config, + const std::string& key, + std::vector& param, + std::string& errorMsg) { + errorMsg.clear(); + std::string currentKey = ExtractCurrentKey(key); + const Json::Value* itr = config.find(currentKey.c_str(), currentKey.c_str() + currentKey.length()); + if (itr) { + if (!itr->isArray()) { + errorMsg = "param " + key + " is not of type list"; + return false; + } + for (auto it = itr->begin(); it != itr->end(); ++it) { + if constexpr (std::is_same_v) { + if (!it->isBool()) { + errorMsg = "element in list param " + key + " is not of type bool"; + param.clear(); + return false; + } + param.emplace_back(it->asBool()); + } else if constexpr (std::is_same_v) { + if (!it->isUInt()) { + errorMsg = "element in list param " + key + " is not of type uint"; + param.clear(); + return false; + } + param.emplace_back(it->asUInt()); + } else if constexpr (std::is_same_v) { + if (!it->isInt()) { + errorMsg = "element in list param " + key + " is not of type int"; + param.clear(); + return false; + } + param.emplace_back(it->asInt()); + } else if constexpr (std::is_same_v) { + if (!it->isString()) { + errorMsg = "element in list param " + key + " is not of type string"; + param.clear(); + return false; + } + param.emplace_back(it->asString()); + } else { + errorMsg = "element in list param " + key + " is not supported"; + param.clear(); + return false; + } + } + } + return true; +} + template bool GetOptionalMapParam(const Json::Value& config, const std::string& key, diff --git a/core/common/TimeUtil.h b/core/common/TimeUtil.h index d2f18f5db0..57370b333e 100644 --- a/core/common/TimeUtil.h +++ b/core/common/TimeUtil.h @@ -21,7 +21,7 @@ #include #include "common/Strptime.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "pipeline/PipelineContext.h" // Time and timestamp utility. diff --git a/core/common/http/AsynCurlRunner.cpp b/core/common/http/AsynCurlRunner.cpp index abb9100a0b..15ae4c7767 100644 --- a/core/common/http/AsynCurlRunner.cpp +++ b/core/common/http/AsynCurlRunner.cpp @@ -198,6 +198,11 @@ void AsynCurlRunner::HandleCompletedRequests() { LOG_WARNING(sLogger, ("failed to send request", "retry immediately")("retryCnt", request->mTryCnt)( "errMsg", curl_easy_strerror(msg->data.result))); + // free first,becase mPrivateData will be reset in AddRequestToClient + if (request->mPrivateData) { + curl_slist_free_all((curl_slist*)request->mPrivateData); + request->mPrivateData = nullptr; + } AddRequestToClient(unique_ptr(request)); requestReused = true; } else { @@ -206,12 +211,12 @@ void AsynCurlRunner::HandleCompletedRequests() { break; } - if (request->mPrivateData) { - curl_slist_free_all((curl_slist*)request->mPrivateData); - } curl_multi_remove_handle(mClient, handler); curl_easy_cleanup(handler); if (!requestReused) { + if (request->mPrivateData) { + curl_slist_free_all((curl_slist*)request->mPrivateData); + } delete request; } } diff --git a/core/config/ConfigDiff.h b/core/config/ConfigDiff.h index 1512cfc7a3..76571ee22c 100644 --- a/core/config/ConfigDiff.h +++ b/core/config/ConfigDiff.h @@ -20,7 +20,7 @@ #include #include "config/PipelineConfig.h" -#include "config/ProcessConfig.h" +#include "config/InstanceConfig.h" namespace logtail { @@ -33,10 +33,10 @@ class PipelineConfigDiff { bool IsEmpty() { return mRemoved.empty() && mAdded.empty() && mModified.empty(); } }; -class ProcessConfigDiff { +class InstanceConfigDiff { public: - std::vector mAdded; - std::vector mModified; + std::vector mAdded; + std::vector mModified; std::vector mRemoved; std::vector mUnchanged; // 过渡使用,仅供插件系统用 bool IsEmpty() { return mRemoved.empty() && mAdded.empty() && mModified.empty(); } diff --git a/core/config/ProcessConfig.cpp b/core/config/InstanceConfig.cpp similarity index 91% rename from core/config/ProcessConfig.cpp rename to core/config/InstanceConfig.cpp index 158b564186..94c89ca684 100644 --- a/core/config/ProcessConfig.cpp +++ b/core/config/InstanceConfig.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "config/ProcessConfig.h" +#include "config/InstanceConfig.h" #include @@ -22,7 +22,7 @@ #include "common/JsonUtil.h" #include "common/ParamExtractor.h" #include "common/YamlUtil.h" -#include "plugin/PluginRegistry.h" +#include "pipeline/plugin/PluginRegistry.h" using namespace std; diff --git a/core/config/ProcessConfig.h b/core/config/InstanceConfig.h similarity index 81% rename from core/config/ProcessConfig.h rename to core/config/InstanceConfig.h index fffb60833f..82ac47533b 100644 --- a/core/config/ProcessConfig.h +++ b/core/config/InstanceConfig.h @@ -27,7 +27,7 @@ namespace logtail { -struct ProcessConfig { +struct InstanceConfig { std::string mName; std::unique_ptr mDetail; @@ -36,13 +36,13 @@ struct ProcessConfig { std::string mLogstore; std::string mRegion; - ProcessConfig(const std::string& name, std::unique_ptr&& detail) + InstanceConfig(const std::string& name, std::unique_ptr&& detail) : mName(name), mDetail(std::move(detail)) { mProject = ""; mLogstore = ""; mRegion = ""; } - ProcessConfig(const logtail::ProcessConfig& config) { + InstanceConfig(const logtail::InstanceConfig& config) { mName = config.mName; mDetail = std::make_unique(*config.mDetail); mProject = ""; @@ -50,7 +50,7 @@ struct ProcessConfig { mRegion = ""; } - ProcessConfig& operator=(ProcessConfig&& other) { + InstanceConfig& operator=(InstanceConfig&& other) { if (this != &other) { mName = std::move(other.mName); mDetail = std::move(other.mDetail); @@ -61,7 +61,7 @@ struct ProcessConfig { return *this; } - ProcessConfig& operator=(const ProcessConfig& other) { + InstanceConfig& operator=(const InstanceConfig& other) { if (this != &other) { mName = other.mName; mDetail = std::make_unique(*other.mDetail); @@ -77,11 +77,11 @@ struct ProcessConfig { const Json::Value& GetConfig() const { return *mDetail; } }; -inline bool operator==(const ProcessConfig& lhs, const ProcessConfig& rhs) { +inline bool operator==(const InstanceConfig& lhs, const InstanceConfig& rhs) { return (lhs.mName == rhs.mName) && (*lhs.mDetail == *rhs.mDetail); } -inline bool operator!=(const ProcessConfig& lhs, const ProcessConfig& rhs) { +inline bool operator!=(const InstanceConfig& lhs, const InstanceConfig& rhs) { return !(lhs == rhs); } diff --git a/core/config/PipelineConfig.cpp b/core/config/PipelineConfig.cpp index 618c3edd7a..83f3c581b2 100644 --- a/core/config/PipelineConfig.cpp +++ b/core/config/PipelineConfig.cpp @@ -22,7 +22,7 @@ #include "common/JsonUtil.h" #include "common/ParamExtractor.h" #include "common/YamlUtil.h" -#include "plugin/PluginRegistry.h" +#include "pipeline/plugin/PluginRegistry.h" DEFINE_FLAG_BOOL(enable_env_ref_in_config, "enable environment variable reference replacement in configuration", false); @@ -104,8 +104,7 @@ bool PipelineConfig::Parse() { string key, errorMsg; const Json::Value* itr = nullptr; LogtailAlarm& alarm = *LogtailAlarm::GetInstance(); -#ifdef __ENTERPRISE__ - // to send alarm, project, logstore and region should be extracted first. + // to send alarm and init MetricsRecord, project, logstore and region should be extracted first. key = "flushers"; itr = mDetail->find(key.c_str(), key.c_str() + key.size()); if (itr && itr->isArray()) { @@ -122,7 +121,6 @@ bool PipelineConfig::Parse() { } } } -#endif if (!GetOptionalUIntParam(*mDetail, "createTime", mCreateTime, errorMsg)) { PARAM_WARNING_DEFAULT(sLogger, alarm, errorMsg, mCreateTime, noModule, mName, mProject, mLogstore, mRegion); @@ -199,19 +197,19 @@ bool PipelineConfig::Parse() { mLogstore, mRegion); } - const string pluginName = it->asString(); + const string pluginType = it->asString(); if (i == 0) { - if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { mHasGoInput = true; - } else if (PluginRegistry::GetInstance()->IsValidNativeInputPlugin(pluginName)) { + } else if (PluginRegistry::GetInstance()->IsValidNativeInputPlugin(pluginType)) { mHasNativeInput = true; } else { PARAM_ERROR_RETURN( - sLogger, alarm, "unsupported input plugin", pluginName, mName, mProject, mLogstore, mRegion); + sLogger, alarm, "unsupported input plugin", pluginType, mName, mProject, mLogstore, mRegion); } } else { if (mHasGoInput) { - if (PluginRegistry::GetInstance()->IsValidNativeInputPlugin(pluginName)) { + if (PluginRegistry::GetInstance()->IsValidNativeInputPlugin(pluginType)) { PARAM_ERROR_RETURN(sLogger, alarm, "native and extended input plugins coexist", @@ -220,12 +218,12 @@ bool PipelineConfig::Parse() { mProject, mLogstore, mRegion); - } else if (!PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + } else if (!PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { PARAM_ERROR_RETURN( - sLogger, alarm, "unsupported input plugin", pluginName, mName, mProject, mLogstore, mRegion); + sLogger, alarm, "unsupported input plugin", pluginType, mName, mProject, mLogstore, mRegion); } } else { - if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { PARAM_ERROR_RETURN(sLogger, alarm, "native and extended input plugins coexist", @@ -234,20 +232,20 @@ bool PipelineConfig::Parse() { mProject, mLogstore, mRegion); - } else if (!PluginRegistry::GetInstance()->IsValidNativeInputPlugin(pluginName)) { + } else if (!PluginRegistry::GetInstance()->IsValidNativeInputPlugin(pluginType)) { PARAM_ERROR_RETURN( - sLogger, alarm, "unsupported input plugin", pluginName, mName, mProject, mLogstore, mRegion); + sLogger, alarm, "unsupported input plugin", pluginType, mName, mProject, mLogstore, mRegion); } } } mInputs.push_back(&plugin); // TODO: remove these special restrictions - if (pluginName == "input_observer_network") { + if (pluginType == "input_observer_network") { hasObserverInput = true; - } else if (pluginName == "input_file" || pluginName == "input_container_stdio") { + } else if (pluginType == "input_file" || pluginType == "input_container_stdio") { hasFileInput = true; #ifdef __ENTERPRISE__ - } else if (pluginName == "input_stream") { + } else if (pluginType == "input_stream") { if (!AppConfig::GetInstance()->GetOpenStreamLog()) { PARAM_ERROR_RETURN( sLogger, alarm, "stream log is not enabled", noModule, mName, mProject, mLogstore, mRegion); @@ -333,9 +331,9 @@ bool PipelineConfig::Parse() { mLogstore, mRegion); } - const string pluginName = it->asString(); + const string pluginType = it->asString(); if (mHasGoInput) { - if (PluginRegistry::GetInstance()->IsValidNativeProcessorPlugin(pluginName)) { + if (PluginRegistry::GetInstance()->IsValidNativeProcessorPlugin(pluginType)) { PARAM_ERROR_RETURN(sLogger, alarm, "native processor plugins coexist with extended input plugins", @@ -344,13 +342,13 @@ bool PipelineConfig::Parse() { mProject, mLogstore, mRegion); - } else if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + } else if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { mHasGoProcessor = true; } else { PARAM_ERROR_RETURN(sLogger, alarm, "unsupported processor plugin", - pluginName, + pluginType, mName, mProject, mLogstore, @@ -358,7 +356,7 @@ bool PipelineConfig::Parse() { } } else { if (isCurrentPluginNative) { - if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { // TODO: remove these special restrictions if (!hasObserverInput && !hasFileInput) { PARAM_ERROR_RETURN(sLogger, @@ -373,16 +371,16 @@ bool PipelineConfig::Parse() { } isCurrentPluginNative = false; mHasGoProcessor = true; - } else if (!PluginRegistry::GetInstance()->IsValidNativeProcessorPlugin(pluginName)) { + } else if (!PluginRegistry::GetInstance()->IsValidNativeProcessorPlugin(pluginType)) { PARAM_ERROR_RETURN(sLogger, alarm, "unsupported processor plugin", - pluginName, + pluginType, mName, mProject, mLogstore, mRegion); - } else if (pluginName == "processor_spl") { + } else if (pluginType == "processor_spl") { if (i != 0 || itr->size() != 1) { PARAM_ERROR_RETURN(sLogger, alarm, @@ -408,22 +406,22 @@ bool PipelineConfig::Parse() { mHasNativeProcessor = true; } } else { - if (PluginRegistry::GetInstance()->IsValidNativeProcessorPlugin(pluginName)) { + if (PluginRegistry::GetInstance()->IsValidNativeProcessorPlugin(pluginType)) { PARAM_ERROR_RETURN(sLogger, alarm, "native processor plugin comes after extended processor plugin", - pluginName, + pluginType, mName, mProject, mLogstore, mRegion); - } else if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + } else if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { mHasGoProcessor = true; } else { PARAM_ERROR_RETURN(sLogger, alarm, "unsupported processor plugin", - pluginName, + pluginType, mName, mProject, mLogstore, @@ -433,7 +431,7 @@ bool PipelineConfig::Parse() { } mProcessors.push_back(&plugin); if (i == 0) { - if (pluginName == "processor_parse_json_native" || pluginName == "processor_json") { + if (pluginType == "processor_parse_json_native" || pluginType == "processor_json") { mIsFirstProcessorJson = true; } } @@ -496,8 +494,8 @@ bool PipelineConfig::Parse() { mLogstore, mRegion); } - const string pluginName = it->asString(); - if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + const string pluginType = it->asString(); + if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { // TODO: remove these special restrictions if (mHasNativeInput && !hasFileInput && !hasObserverInput) { PARAM_ERROR_RETURN(sLogger, @@ -511,20 +509,20 @@ bool PipelineConfig::Parse() { mRegion); } mHasGoFlusher = true; - } else if (PluginRegistry::GetInstance()->IsValidNativeFlusherPlugin(pluginName)) { + } else if (PluginRegistry::GetInstance()->IsValidNativeFlusherPlugin(pluginType)) { mHasNativeFlusher = true; // TODO: remove these special restrictions ++nativeFlusherCnt; - if (pluginName == "flusher_sls") { + if (pluginType == "flusher_sls") { hasFlusherSLS = true; } } else { PARAM_ERROR_RETURN( - sLogger, alarm, "unsupported flusher plugin", pluginName, mName, mProject, mLogstore, mRegion); + sLogger, alarm, "unsupported flusher plugin", pluginType, mName, mProject, mLogstore, mRegion); } #ifdef __ENTERPRISE__ // TODO: remove these special restrictions - if (hasStreamInput && pluginName != "flusher_sls") { + if (hasStreamInput && pluginType != "flusher_sls") { PARAM_ERROR_RETURN(sLogger, alarm, "flusher plugins other than flusher_sls coexist with input_stream", @@ -640,10 +638,10 @@ bool PipelineConfig::Parse() { mLogstore, mRegion); } - const string pluginName = it->asString(); - if (!PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + const string pluginType = it->asString(); + if (!PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { PARAM_ERROR_RETURN( - sLogger, alarm, "unsupported aggregator plugin", pluginName, mName, mProject, mLogstore, mRegion); + sLogger, alarm, "unsupported aggregator plugin", pluginType, mName, mProject, mLogstore, mRegion); } mAggregators.push_back(&plugin); } @@ -706,10 +704,10 @@ bool PipelineConfig::Parse() { mLogstore, mRegion); } - const string pluginName = it->asString(); - if (!PluginRegistry::GetInstance()->IsValidGoPlugin(pluginName)) { + const string pluginType = it->asString(); + if (!PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { PARAM_ERROR_RETURN( - sLogger, alarm, "unsupported extension plugin", pluginName, mName, mProject, mLogstore, mRegion); + sLogger, alarm, "unsupported extension plugin", pluginType, mName, mProject, mLogstore, mRegion); } mExtensions.push_back(&plugin); } diff --git a/core/config/feedbacker/ConfigFeedbackReceiver.cpp b/core/config/feedbacker/ConfigFeedbackReceiver.cpp index 5e27bf7713..b9807ef9e4 100644 --- a/core/config/feedbacker/ConfigFeedbackReceiver.cpp +++ b/core/config/feedbacker/ConfigFeedbackReceiver.cpp @@ -31,9 +31,9 @@ void ConfigFeedbackReceiver::RegisterPipelineConfig(const std::string& name, Con mPipelineConfigFeedbackableMap[name] = feedbackable; } -void ConfigFeedbackReceiver::RegisterProcessConfig(const std::string& name, ConfigFeedbackable* feedbackable) { +void ConfigFeedbackReceiver::RegisterInstanceConfig(const std::string& name, ConfigFeedbackable* feedbackable) { std::lock_guard lock(mMutex); - mProcessConfigFeedbackableMap[name] = feedbackable; + mInstanceConfigFeedbackableMap[name] = feedbackable; } void ConfigFeedbackReceiver::RegisterCommand(const std::string& type, @@ -48,9 +48,9 @@ void ConfigFeedbackReceiver::UnregisterPipelineConfig(const std::string& name) { mPipelineConfigFeedbackableMap.erase(name); } -void ConfigFeedbackReceiver::UnregisterProcessConfig(const std::string& name) { +void ConfigFeedbackReceiver::UnregisterInstanceConfig(const std::string& name) { std::lock_guard lock(mMutex); - mProcessConfigFeedbackableMap.erase(name); + mInstanceConfigFeedbackableMap.erase(name); } void ConfigFeedbackReceiver::UnregisterCommand(const std::string& type, const std::string& name) { @@ -66,11 +66,11 @@ void ConfigFeedbackReceiver::FeedbackPipelineConfigStatus(const std::string& nam } } -void ConfigFeedbackReceiver::FeedbackProcessConfigStatus(const std::string& name, ConfigFeedbackStatus status) { +void ConfigFeedbackReceiver::FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status) { std::lock_guard lock(mMutex); - auto iter = mProcessConfigFeedbackableMap.find(name); - if (iter != mProcessConfigFeedbackableMap.end()) { - iter->second->FeedbackProcessConfigStatus(name, status); + auto iter = mInstanceConfigFeedbackableMap.find(name); + if (iter != mInstanceConfigFeedbackableMap.end()) { + iter->second->FeedbackInstanceConfigStatus(name, status); } } diff --git a/core/config/feedbacker/ConfigFeedbackReceiver.h b/core/config/feedbacker/ConfigFeedbackReceiver.h index 47b08ca22e..86796afbcd 100644 --- a/core/config/feedbacker/ConfigFeedbackReceiver.h +++ b/core/config/feedbacker/ConfigFeedbackReceiver.h @@ -29,20 +29,20 @@ class ConfigFeedbackReceiver { public: static ConfigFeedbackReceiver& GetInstance(); void RegisterPipelineConfig(const std::string& name, ConfigFeedbackable* feedbackable); - void RegisterProcessConfig(const std::string& name, ConfigFeedbackable* feedbackable); + void RegisterInstanceConfig(const std::string& name, ConfigFeedbackable* feedbackable); void RegisterCommand(const std::string& type, const std::string& name, ConfigFeedbackable* feedbackable); void UnregisterPipelineConfig(const std::string& name); - void UnregisterProcessConfig(const std::string& name); + void UnregisterInstanceConfig(const std::string& name); void UnregisterCommand(const std::string& type, const std::string& name); void FeedbackPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status); - void FeedbackProcessConfigStatus(const std::string& name, ConfigFeedbackStatus status); + void FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status); void FeedbackCommandConfigStatus(const std::string& type, const std::string& name, ConfigFeedbackStatus status); private: ConfigFeedbackReceiver() {} std::mutex mMutex; std::unordered_map mPipelineConfigFeedbackableMap; - std::unordered_map mProcessConfigFeedbackableMap; + std::unordered_map mInstanceConfigFeedbackableMap; std::unordered_map mCommandFeedbackableMap; }; diff --git a/core/config/feedbacker/ConfigFeedbackable.h b/core/config/feedbacker/ConfigFeedbackable.h index b8564ac5d3..f027e2e758 100644 --- a/core/config/feedbacker/ConfigFeedbackable.h +++ b/core/config/feedbacker/ConfigFeedbackable.h @@ -29,7 +29,7 @@ class ConfigFeedbackable { public: virtual ~ConfigFeedbackable() = default; // LCOV_EXCL_LINE virtual void FeedbackPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status) = 0; - virtual void FeedbackProcessConfigStatus(const std::string& name, ConfigFeedbackStatus status) = 0; + virtual void FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status) = 0; virtual void FeedbackCommandConfigStatus(const std::string& type, const std::string& name, ConfigFeedbackStatus status) = 0; diff --git a/core/config/provider/CommonConfigProvider.cpp b/core/config/provider/CommonConfigProvider.cpp index d3ed4bc34c..77e67b2c5f 100644 --- a/core/config/provider/CommonConfigProvider.cpp +++ b/core/config/provider/CommonConfigProvider.cpp @@ -145,7 +145,7 @@ void CommonConfigProvider::LoadConfigFile() { mPipelineConfigInfoMap[info.name] = info; } } - for (auto const& entry : filesystem::directory_iterator(mProcessSourceDir, ec)) { + for (auto const& entry : filesystem::directory_iterator(mInstanceSourceDir, ec)) { Json::Value detail; if (LoadConfigDetailFromFile(entry, detail)) { ConfigInfo info; @@ -156,8 +156,8 @@ void CommonConfigProvider::LoadConfigFile() { } info.status = ConfigFeedbackStatus::APPLYING; info.detail = detail.toStyledString(); - lock_guard infomaplock(mProcessInfoMapMux); - mProcessConfigInfoMap[info.name] = info; + lock_guard infomaplock(mInstanceInfoMapMux); + mInstanceConfigInfoMap[info.name] = info; } } } @@ -165,7 +165,7 @@ void CommonConfigProvider::LoadConfigFile() { void CommonConfigProvider::CheckUpdateThread() { LOG_INFO(sLogger, (sName, "started")); usleep((rand() % 10) * 100 * 1000); - int32_t lastCheckTime = 0; + int32_t lastCheckTime = time(NULL); unique_lock lock(mThreadRunningMux); while (mIsThreadRunning) { int32_t curTime = time(NULL); @@ -247,10 +247,10 @@ void CommonConfigProvider::GetConfigUpdate() { LOG_DEBUG(sLogger, ("fetch pipelineConfig, config file number", pipelineConfig.size())); UpdateRemotePipelineConfig(pipelineConfig); } - ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail> processConfig; - if (FetchProcessConfig(heartbeatResponse, processConfig) && !processConfig.empty()) { - LOG_DEBUG(sLogger, ("fetch processConfig config, config file number", processConfig.size())); - UpdateRemoteProcessConfig(processConfig); + ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail> instanceConfig; + if (FetchInstanceConfig(heartbeatResponse, instanceConfig) && !instanceConfig.empty()) { + LOG_DEBUG(sLogger, ("fetch instanceConfig config, config file number", instanceConfig.size())); + UpdateRemoteInstanceConfig(instanceConfig); } ++mSequenceNum; } @@ -260,7 +260,7 @@ configserver::proto::v2::HeartbeatRequest CommonConfigProvider::PrepareHeartbeat string requestID = CalculateRandomUUID(); heartbeatReq.set_request_id(requestID); heartbeatReq.set_sequence_num(mSequenceNum); - heartbeatReq.set_capabilities(configserver::proto::v2::AcceptsProcessConfig + heartbeatReq.set_capabilities(configserver::proto::v2::AcceptsInstanceConfig | configserver::proto::v2::AcceptsPipelineConfig); heartbeatReq.set_instance_id(GetInstanceId()); heartbeatReq.set_agent_type("LoongCollector"); @@ -278,9 +278,9 @@ configserver::proto::v2::HeartbeatRequest CommonConfigProvider::PrepareHeartbeat for (const auto& configInfo : mPipelineConfigInfoMap) { addConfigInfoToRequest(configInfo, heartbeatReq.add_pipeline_configs()); } - lock_guard processinfomaplock(mProcessInfoMapMux); - for (const auto& configInfo : mProcessConfigInfoMap) { - addConfigInfoToRequest(configInfo, heartbeatReq.add_process_configs()); + lock_guard instanceinfomaplock(mInstanceInfoMapMux); + for (const auto& configInfo : mInstanceConfigInfoMap) { + addConfigInfoToRequest(configInfo, heartbeatReq.add_instance_configs()); } for (auto& configInfo : mCommandInfoMap) { @@ -373,13 +373,13 @@ bool CommonConfigProvider::FetchPipelineConfig( } } -bool CommonConfigProvider::FetchProcessConfig( +bool CommonConfigProvider::FetchInstanceConfig( configserver::proto::v2::HeartbeatResponse& heartbeatResponse, ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail>& result) { if (heartbeatResponse.flags() & ::configserver::proto::v2::FetchPipelineConfigDetail) { - return FetchProcessConfigFromServer(heartbeatResponse, result); + return FetchInstanceConfigFromServer(heartbeatResponse, result); } else { - result.Swap(heartbeatResponse.mutable_process_config_updates()); + result.Swap(heartbeatResponse.mutable_instance_config_updates()); return true; } } @@ -452,10 +452,10 @@ void CommonConfigProvider::UpdateRemotePipelineConfig( } } -void CommonConfigProvider::UpdateRemoteProcessConfig( +void CommonConfigProvider::UpdateRemoteInstanceConfig( const google::protobuf::RepeatedPtrField& configs) { error_code ec; - const std::filesystem::path& sourceDir = mProcessSourceDir; + const std::filesystem::path& sourceDir = mInstanceSourceDir; filesystem::create_directories(sourceDir, ec); if (ec) { StopUsingConfigServer(); @@ -465,56 +465,56 @@ void CommonConfigProvider::UpdateRemoteProcessConfig( return; } - lock_guard lock(mProcessMux); - lock_guard infomaplock(mProcessInfoMapMux); + lock_guard lock(mInstanceMux); + lock_guard infomaplock(mInstanceInfoMapMux); for (const auto& config : configs) { filesystem::path filePath = sourceDir / (config.name() + ".json"); if (config.version() == -1) { - mProcessConfigInfoMap.erase(config.name()); + mInstanceConfigInfoMap.erase(config.name()); filesystem::remove(filePath, ec); - ConfigFeedbackReceiver::GetInstance().UnregisterProcessConfig(config.name()); + ConfigFeedbackReceiver::GetInstance().UnregisterInstanceConfig(config.name()); } else { filesystem::path filePath = sourceDir / (config.name() + ".json"); if (config.version() == -1) { - mProcessConfigInfoMap.erase(config.name()); + mInstanceConfigInfoMap.erase(config.name()); filesystem::remove(filePath, ec); - ConfigFeedbackReceiver::GetInstance().UnregisterProcessConfig(config.name()); + ConfigFeedbackReceiver::GetInstance().UnregisterInstanceConfig(config.name()); } else { if (!DumpConfigFile(config, sourceDir)) { - mProcessConfigInfoMap[config.name()] = ConfigInfo{.name = config.name(), + mInstanceConfigInfoMap[config.name()] = ConfigInfo{.name = config.name(), .version = config.version(), .status = ConfigFeedbackStatus::FAILED, .detail = config.detail()}; continue; } - mProcessConfigInfoMap[config.name()] = ConfigInfo{.name = config.name(), + mInstanceConfigInfoMap[config.name()] = ConfigInfo{.name = config.name(), .version = config.version(), .status = ConfigFeedbackStatus::APPLYING, .detail = config.detail()}; - ConfigFeedbackReceiver::GetInstance().RegisterProcessConfig(config.name(), this); + ConfigFeedbackReceiver::GetInstance().RegisterInstanceConfig(config.name(), this); } } } } -bool CommonConfigProvider::FetchProcessConfigFromServer( +bool CommonConfigProvider::FetchInstanceConfigFromServer( ::configserver::proto::v2::HeartbeatResponse& heartbeatResponse, ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail>& res) { configserver::proto::v2::FetchConfigRequest fetchConfigRequest; string requestID = CalculateRandomUUID(); fetchConfigRequest.set_request_id(requestID); fetchConfigRequest.set_instance_id(GetInstanceId()); - for (const auto& config : heartbeatResponse.process_config_updates()) { + for (const auto& config : heartbeatResponse.instance_config_updates()) { auto reqConfig = fetchConfigRequest.add_req_configs(); reqConfig->set_name(config.name()); reqConfig->set_version(config.version()); } string operation = sdk::CONFIGSERVERAGENT; - operation.append("/FetchProcessConfig"); + operation.append("/FetchInstanceConfig"); string reqBody; fetchConfigRequest.SerializeToString(&reqBody); string fetchConfigResponse; - if (SendHttpRequest(operation, reqBody, "FetchProcessConfig", fetchConfigRequest.request_id(), fetchConfigResponse)) { + if (SendHttpRequest(operation, reqBody, "FetchInstanceConfig", fetchConfigRequest.request_id(), fetchConfigResponse)) { configserver::proto::v2::FetchConfigResponse fetchConfigResponsePb; fetchConfigResponsePb.ParseFromString(fetchConfigResponse); res.Swap(fetchConfigResponsePb.mutable_config_details()); @@ -558,14 +558,14 @@ void CommonConfigProvider::FeedbackPipelineConfigStatus(const std::string& name, LOG_DEBUG(sLogger, ("CommonConfigProvider", "FeedbackPipelineConfigStatus")("name", name)("status", ToStringView(status))); } -void CommonConfigProvider::FeedbackProcessConfigStatus(const std::string& name, ConfigFeedbackStatus status) { - lock_guard infomaplock(mProcessInfoMapMux); - auto info = mProcessConfigInfoMap.find(name); - if (info != mProcessConfigInfoMap.end()) { +void CommonConfigProvider::FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status) { + lock_guard infomaplock(mInstanceInfoMapMux); + auto info = mInstanceConfigInfoMap.find(name); + if (info != mInstanceConfigInfoMap.end()) { info->second.status = status; } LOG_DEBUG(sLogger, - ("CommonConfigProvider", "FeedbackProcessConfigStatus")("name", name)("status", ToStringView(status))); + ("CommonConfigProvider", "FeedbackInstanceConfigStatus")("name", name)("status", ToStringView(status))); } void CommonConfigProvider::FeedbackCommandConfigStatus(const std::string& type, const std::string& name, diff --git a/core/config/provider/CommonConfigProvider.h b/core/config/provider/CommonConfigProvider.h index f8c5e8aa5d..75f20521b7 100644 --- a/core/config/provider/CommonConfigProvider.h +++ b/core/config/provider/CommonConfigProvider.h @@ -26,7 +26,7 @@ #include "config/feedbacker/ConfigFeedbackable.h" #include "config/provider/ConfigProvider.h" -#include "config_server_pb/v2/agent.pb.h" +#include "protobuf/config_server/v2/agent.pb.h" namespace logtail { @@ -62,7 +62,7 @@ class CommonConfigProvider : public ConfigProvider, ConfigFeedbackable { void Stop() override; void FeedbackPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status) override; - void FeedbackProcessConfigStatus(const std::string& name, ConfigFeedbackStatus status) override; + void FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status) override; void FeedbackCommandConfigStatus(const std::string& type, const std::string& name, ConfigFeedbackStatus status) override; CommonConfigProvider() = default; @@ -73,7 +73,7 @@ class CommonConfigProvider : public ConfigProvider, ConfigFeedbackable { virtual bool SendHeartbeat(const configserver::proto::v2::HeartbeatRequest&, configserver::proto::v2::HeartbeatResponse&); - virtual bool FetchProcessConfig(::configserver::proto::v2::HeartbeatResponse&, + virtual bool FetchInstanceConfig(::configserver::proto::v2::HeartbeatResponse&, ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail>&); virtual bool FetchPipelineConfig(::configserver::proto::v2::HeartbeatResponse&, @@ -84,10 +84,10 @@ class CommonConfigProvider : public ConfigProvider, ConfigFeedbackable { void UpdateRemotePipelineConfig( const google::protobuf::RepeatedPtrField& configs); void - UpdateRemoteProcessConfig(const google::protobuf::RepeatedPtrField& configs); + UpdateRemoteInstanceConfig(const google::protobuf::RepeatedPtrField& configs); virtual bool - FetchProcessConfigFromServer(::configserver::proto::v2::HeartbeatResponse&, + FetchInstanceConfigFromServer(::configserver::proto::v2::HeartbeatResponse&, ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail>&); virtual bool FetchPipelineConfigFromServer(::configserver::proto::v2::HeartbeatResponse&, @@ -104,12 +104,12 @@ class CommonConfigProvider : public ConfigProvider, ConfigFeedbackable { mutable std::condition_variable mStopCV; bool mConfigServerAvailable = false; - mutable std::mutex mProcessInfoMapMux; + mutable std::mutex mInstanceInfoMapMux; mutable std::mutex mPipelineInfoMapMux; mutable std::mutex mCommondInfoMapMux; std::unordered_map mPipelineConfigInfoMap; - std::unordered_map mProcessConfigInfoMap; + std::unordered_map mInstanceConfigInfoMap; std::unordered_map mCommandInfoMap; private: diff --git a/core/config/provider/ConfigProvider.cpp b/core/config/provider/ConfigProvider.cpp index dd3d864f38..44c2accbe4 100644 --- a/core/config/provider/ConfigProvider.cpp +++ b/core/config/provider/ConfigProvider.cpp @@ -27,17 +27,17 @@ void ConfigProvider::Init(const string& dir) { mPipelineSourceDir /= "config"; mPipelineSourceDir /= dir; - mProcessSourceDir.assign(AppConfig::GetInstance()->GetLogtailSysConfDir()); - mProcessSourceDir /= "processconfig"; - mProcessSourceDir /= dir; + mInstanceSourceDir.assign(AppConfig::GetInstance()->GetLogtailSysConfDir()); + mInstanceSourceDir /= "instanceconfig"; + mInstanceSourceDir /= dir; error_code ec; filesystem::create_directories(mPipelineSourceDir, ec); ConfigWatcher::GetInstance()->AddPipelineSource(mPipelineSourceDir, &mPipelineMux); ec.clear(); - filesystem::create_directories(mProcessSourceDir, ec); - ConfigWatcher::GetInstance()->AddProcessSource(mProcessSourceDir, &mProcessMux); + filesystem::create_directories(mInstanceSourceDir, ec); + ConfigWatcher::GetInstance()->AddInstanceSource(mInstanceSourceDir, &mInstanceMux); } } // namespace logtail diff --git a/core/config/provider/ConfigProvider.h b/core/config/provider/ConfigProvider.h index 0e789d091d..ed58ac3c11 100644 --- a/core/config/provider/ConfigProvider.h +++ b/core/config/provider/ConfigProvider.h @@ -35,9 +35,9 @@ class ConfigProvider { virtual ~ConfigProvider() = default; std::filesystem::path mPipelineSourceDir; - std::filesystem::path mProcessSourceDir; + std::filesystem::path mInstanceSourceDir; mutable std::mutex mPipelineMux; - mutable std::mutex mProcessMux; + mutable std::mutex mInstanceMux; }; } // namespace logtail diff --git a/core/config/provider/LegacyCommonConfigProvider.h b/core/config/provider/LegacyCommonConfigProvider.h index fe00bc23b5..7e0498c9a5 100644 --- a/core/config/provider/LegacyCommonConfigProvider.h +++ b/core/config/provider/LegacyCommonConfigProvider.h @@ -24,7 +24,7 @@ #include #include "config/provider/ConfigProvider.h" -#include "config_server_pb/v1/agent.pb.h" +#include "protobuf/config_server/v1/agent.pb.h" namespace logtail { diff --git a/core/config/watcher/ConfigWatcher.cpp b/core/config/watcher/ConfigWatcher.cpp index e0df34b7ec..816a33a371 100644 --- a/core/config/watcher/ConfigWatcher.cpp +++ b/core/config/watcher/ConfigWatcher.cpp @@ -20,7 +20,7 @@ #include "logger/Logger.h" #include "pipeline/PipelineManager.h" -#include "pipeline/ProcessConfigManager.h" +#include "pipeline/InstanceConfigManager.h" using namespace std; @@ -29,7 +29,7 @@ namespace logtail { bool ReadFile(const string& filepath, string& content); ConfigWatcher::ConfigWatcher() - : mPipelineManager(PipelineManager::GetInstance()), mProcessConfigManager(ProcessConfigManager::GetInstance()) { + : mPipelineManager(PipelineManager::GetInstance()), mInstanceConfigManager(InstanceConfigManager::GetInstance()) { } template @@ -219,10 +219,10 @@ PipelineConfigDiff ConfigWatcher::CheckPipelineConfigDiff() { mPipelineConfigDir, mPipelineConfigDirMutexMap, mPipelineFileInfoMap, mPipelineManager, configType); } -ProcessConfigDiff ConfigWatcher::CheckProcessConfigDiff() { - const static std::string configType = "processConfig"; - return CheckConfigDiff( - mProcessConfigDir, mProcessConfigDirMutexMap, mProcessFileInfoMap, mProcessConfigManager, configType); +InstanceConfigDiff ConfigWatcher::CheckInstanceConfigDiff() { + const static std::string configType = "instanceConfig"; + return CheckConfigDiff( + mInstanceConfigDir, mInstanceConfigDirMutexMap, mInstanceFileInfoMap, mInstanceConfigManager, configType); } void ConfigWatcher::AddPipelineSource(const string& dir, mutex* mux) { @@ -232,10 +232,10 @@ void ConfigWatcher::AddPipelineSource(const string& dir, mutex* mux) { } } -void ConfigWatcher::AddProcessSource(const string& dir, mutex* mux) { - mProcessConfigDir.emplace_back(dir); +void ConfigWatcher::AddInstanceSource(const string& dir, mutex* mux) { + mInstanceConfigDir.emplace_back(dir); if (mux != nullptr) { - mProcessConfigDirMutexMap[dir] = mux; + mInstanceConfigDirMutexMap[dir] = mux; } } @@ -250,8 +250,8 @@ void ConfigWatcher::ClearEnvironment() { mPipelineConfigDir.clear(); mPipelineFileInfoMap.clear(); - mProcessConfigDir.clear(); - mProcessFileInfoMap.clear(); + mInstanceConfigDir.clear(); + mInstanceFileInfoMap.clear(); mCommandConfigDir.clear(); } diff --git a/core/config/watcher/ConfigWatcher.h b/core/config/watcher/ConfigWatcher.h index 8fea31ef93..f4f04f4fc0 100644 --- a/core/config/watcher/ConfigWatcher.h +++ b/core/config/watcher/ConfigWatcher.h @@ -25,12 +25,12 @@ #include "config/ConfigDiff.h" #include "config/PipelineConfig.h" -#include "config/ProcessConfig.h" +#include "config/InstanceConfig.h" namespace logtail { class PipelineManager; -class ProcessConfigManager; +class InstanceConfigManager; class ConfigWatcher { public: @@ -43,15 +43,15 @@ class ConfigWatcher { } PipelineConfigDiff CheckPipelineConfigDiff(); - ProcessConfigDiff CheckProcessConfigDiff(); + InstanceConfigDiff CheckInstanceConfigDiff(); void AddPipelineSource(const std::string& dir, std::mutex* mux = nullptr); - void AddProcessSource(const std::string& dir, std::mutex* mux = nullptr); + void AddInstanceSource(const std::string& dir, std::mutex* mux = nullptr); void AddCommandSource(const std::string& dir, std::mutex* mux = nullptr); // for ut void SetPipelineManager(const PipelineManager* pm) { mPipelineManager = pm; } - void SetProcessConfigManager(const ProcessConfigManager* pm) { mProcessConfigManager = pm; } + void SetInstanceConfigManager(const InstanceConfigManager* pm) { mInstanceConfigManager = pm; } void ClearEnvironment(); private: @@ -69,8 +69,8 @@ class ConfigWatcher { std::vector mPipelineConfigDir; std::unordered_map mPipelineConfigDirMutexMap; - std::vector mProcessConfigDir; - std::unordered_map mProcessConfigDirMutexMap; + std::vector mInstanceConfigDir; + std::unordered_map mInstanceConfigDirMutexMap; std::vector mCommandConfigDir; std::unordered_map mCommandConfigDirMutexMap; @@ -78,8 +78,8 @@ class ConfigWatcher { std::map> mPipelineFileInfoMap; const PipelineManager* mPipelineManager = nullptr; - std::map> mProcessFileInfoMap; - const ProcessConfigManager* mProcessConfigManager = nullptr; + std::map> mInstanceFileInfoMap; + const InstanceConfigManager* mInstanceConfigManager = nullptr; bool CheckDirectoryStatus(const std::filesystem::path& dir); }; diff --git a/core/container_manager/ContainerDiscoveryOptions.cpp b/core/container_manager/ContainerDiscoveryOptions.cpp index ce51ed5ecf..f9cdb6111a 100644 --- a/core/container_manager/ContainerDiscoveryOptions.cpp +++ b/core/container_manager/ContainerDiscoveryOptions.cpp @@ -16,6 +16,7 @@ #include "common/LogtailCommonFlags.h" #include "common/ParamExtractor.h" +#include "pipeline/Pipeline.h" using namespace std; @@ -23,7 +24,7 @@ DEFINE_FLAG_INT32(default_plugin_log_queue_size, "", 10); namespace logtail { -bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginName) { +bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginType) { string errorMsg; // K8pluginNamespaceRegex @@ -31,7 +32,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -43,7 +44,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -55,7 +56,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -67,7 +68,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -79,7 +80,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -91,7 +92,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -103,7 +104,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -115,7 +116,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -127,7 +128,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -137,7 +138,7 @@ bool ContainerFilters::Init(const Json::Value& config, const PipelineContext& ct return true; } -bool ContainerDiscoveryOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginName) { +bool ContainerDiscoveryOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginType) { string errorMsg; const char* key = "ContainerFilters"; @@ -147,13 +148,13 @@ bool ContainerDiscoveryOptions::Init(const Json::Value& config, const PipelineCo PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), "param ContainerFilters is not of type object", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), ctx.GetRegion()); } else { - mContainerFilters.Init(*itr, ctx, pluginName); + mContainerFilters.Init(*itr, ctx, pluginType); } } @@ -162,7 +163,7 @@ bool ContainerDiscoveryOptions::Init(const Json::Value& config, const PipelineCo PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -174,7 +175,7 @@ bool ContainerDiscoveryOptions::Init(const Json::Value& config, const PipelineCo PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -187,7 +188,7 @@ bool ContainerDiscoveryOptions::Init(const Json::Value& config, const PipelineCo ctx.GetAlarm(), errorMsg, mCollectingContainersMeta, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -197,8 +198,9 @@ bool ContainerDiscoveryOptions::Init(const Json::Value& config, const PipelineCo return true; } -void ContainerDiscoveryOptions::GenerateContainerMetaFetchingGoPipeline( - Json::Value& res, const FileDiscoveryOptions* fileDiscovery) const { +void ContainerDiscoveryOptions::GenerateContainerMetaFetchingGoPipeline(Json::Value& res, + const FileDiscoveryOptions* fileDiscovery, + const PluginInstance::PluginMeta pluginMeta) const { Json::Value plugin(Json::objectValue); Json::Value detail(Json::objectValue); Json::Value object(Json::objectValue); @@ -248,7 +250,7 @@ void ContainerDiscoveryOptions::GenerateContainerMetaFetchingGoPipeline( if (mCollectingContainersMeta) { detail["CollectingContainersMeta"] = Json::Value(true); } - plugin["type"] = Json::Value("metric_container_info"); + plugin["type"] = Json::Value(Pipeline::GenPluginTypeWithID("metric_container_info", pluginMeta.mPluginID)); plugin["detail"] = detail; res["inputs"].append(plugin); diff --git a/core/container_manager/ContainerDiscoveryOptions.h b/core/container_manager/ContainerDiscoveryOptions.h index f3bf800cd5..633a99caa4 100644 --- a/core/container_manager/ContainerDiscoveryOptions.h +++ b/core/container_manager/ContainerDiscoveryOptions.h @@ -24,6 +24,7 @@ #include "file_server/FileDiscoveryOptions.h" #include "pipeline/PipelineContext.h" +#include "pipeline/plugin/instance/PluginInstance.h" namespace logtail { @@ -38,7 +39,7 @@ struct ContainerFilters { std::unordered_map mIncludeContainerLabel; std::unordered_map mExcludeContainerLabel; - bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginName); + bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginType); }; struct ContainerDiscoveryOptions { @@ -48,9 +49,10 @@ struct ContainerDiscoveryOptions { // 启用容器元信息预览 bool mCollectingContainersMeta = false; - bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginName); + bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginType); void GenerateContainerMetaFetchingGoPipeline(Json::Value& res, - const FileDiscoveryOptions* fileDiscovery = nullptr) const; + const FileDiscoveryOptions* fileDiscovery = nullptr, + const PluginInstance::PluginMeta pluginMeta = {"0", "0", "0"}) const; }; using ContainerDiscoveryConfig = std::pair; diff --git a/core/dependencies.cmake b/core/dependencies.cmake index a571fd92a3..50d1aec347 100644 --- a/core/dependencies.cmake +++ b/core/dependencies.cmake @@ -128,7 +128,7 @@ macro(link_protobuf target_name) endif () endmacro() logtail_define(protobuf_BIN "Absolute path to protoc" "${DEPS_BINARY_ROOT}/protoc") -set(PROTO_FILE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/log_pb") +set(PROTO_FILE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/protobuf/sls") set(PROTO_FILES ${PROTO_FILE_PATH}/sls_logs.proto ${PROTO_FILE_PATH}/logtail_buffer_meta.proto ${PROTO_FILE_PATH}/metric.proto ${PROTO_FILE_PATH}/checkpoint.proto) execute_process(COMMAND ${protobuf_BIN} --proto_path=${PROTO_FILE_PATH} --cpp_out=${PROTO_FILE_PATH} ${PROTO_FILES}) diff --git a/core/ebpf/SourceManager.cpp b/core/ebpf/SourceManager.cpp index ca18e9cab9..4f79e1149d 100644 --- a/core/ebpf/SourceManager.cpp +++ b/core/ebpf/SourceManager.cpp @@ -132,7 +132,7 @@ bool SourceManager::DynamicLibSuccess() { return true; } -void SourceManager::FillCommonConf(nami::eBPFConfig* conf) { +void SourceManager::FillCommonConf(std::unique_ptr& conf) { conf->host_ip_ = mHostIp; conf->host_name_ = mHostName; conf->host_path_prefix_ = mHostPathPrefix; @@ -167,13 +167,13 @@ bool SourceManager::StartPlugin(nami::PluginType plugin_type, // plugin not started ... LOG_INFO(sLogger, ("begin to start plugin, type", int(plugin_type))); - auto conf = new nami::eBPFConfig; + auto conf = std::make_unique(); conf->plugin_type_ = plugin_type; conf->type = UpdataType::SECURE_UPDATE_TYPE_ENABLE_PROBE; conf->config_ = config; FillCommonConf(conf); #ifdef APSARA_UNIT_TEST_MAIN - mConfig = conf; + mConfig = std::move(conf); mRunning[int(plugin_type)] = true; return true; #endif @@ -183,7 +183,7 @@ bool SourceManager::StartPlugin(nami::PluginType plugin_type, return false; } auto init_f = (init_func)f; - int res = init_f(conf); + int res = init_f(conf.release()); if (!res) mRunning[int(plugin_type)] = true; return !res; } @@ -195,13 +195,13 @@ bool SourceManager::UpdatePlugin(nami::PluginType plugin_type, return false; } - auto conf = new nami::eBPFConfig; + auto conf = std::make_unique(); conf->plugin_type_ = plugin_type; conf->type = UpdataType::SECURE_UPDATE_TYPE_CONFIG_CHAGE; conf->config_ = config; FillCommonConf(conf); #ifdef APSARA_UNIT_TEST_MAIN - mConfig = conf; + mConfig = std::move(conf); return true; #endif void* f = mFuncs[(int)ebpf_func::EBPF_UPDATE]; @@ -211,7 +211,7 @@ bool SourceManager::UpdatePlugin(nami::PluginType plugin_type, } auto update_f = (update_func)f; - int res = update_f(conf); + int res = update_f(conf.release()); if (!res) mRunning[int(plugin_type)] = true; return !res; } @@ -244,11 +244,11 @@ bool SourceManager::SuspendPlugin(nami::PluginType plugin_type) { LOG_WARNING(sLogger, ("plugin not started, cannot suspend. type", int(plugin_type))); return false; } - auto config = new nami::eBPFConfig; + auto config = std::make_unique(); config->plugin_type_ = plugin_type; config->type = UpdataType::SECURE_UPDATE_TYPE_SUSPEND_PROBE; #ifdef APSARA_UNIT_TEST_MAIN - mConfig = config; + mConfig = std::move(config); return true; #endif // ensure that sysak would not call handle() @@ -259,7 +259,7 @@ bool SourceManager::SuspendPlugin(nami::PluginType plugin_type) { } auto suspend_f = (suspend_func)f; - int res = suspend_f(config); + int res = suspend_f(config.release()); return !res; } @@ -270,12 +270,12 @@ bool SourceManager::StopPlugin(nami::PluginType plugin_type) { return true; } - auto config = new nami::eBPFConfig; + auto config = std::make_unique(); config->plugin_type_ = plugin_type; config->type = UpdataType::SECURE_UPDATE_TYPE_DISABLE_PROBE; #ifdef APSARA_UNIT_TEST_MAIN - mConfig = config; + mConfig = std::move(config); mRunning[int(plugin_type)] = false; return true; #endif @@ -287,7 +287,7 @@ bool SourceManager::StopPlugin(nami::PluginType plugin_type) { } auto remove_f = (remove_func)f; - int res = remove_f(config); + int res = remove_f(config.release()); if (!res) mRunning[int(plugin_type)] = false; return !res; } diff --git a/core/ebpf/SourceManager.h b/core/ebpf/SourceManager.h index bb356b5d65..34e895048e 100644 --- a/core/ebpf/SourceManager.h +++ b/core/ebpf/SourceManager.h @@ -62,7 +62,7 @@ class SourceManager { ~SourceManager(); private: - void FillCommonConf(nami::eBPFConfig* conf); + void FillCommonConf(std::unique_ptr& conf); bool LoadDynamicLib(const std::string& lib_name); bool DynamicLibSuccess(); bool UpdatePlugin(nami::PluginType plugin_type, @@ -92,7 +92,7 @@ class SourceManager { std::string mFullLibName; #ifdef APSARA_UNIT_TEST_MAIN - nami::eBPFConfig* mConfig; + std::unique_ptr mConfig; friend class eBPFServerUnittest; #endif }; diff --git a/core/ebpf/config.cpp b/core/ebpf/config.cpp index fd9fb8d7d4..35c18ed749 100644 --- a/core/ebpf/config.cpp +++ b/core/ebpf/config.cpp @@ -39,14 +39,50 @@ DEFINE_FLAG_BOOL(ebpf_process_probe_config_enable_oom_detect, "if ebpf process p namespace logtail { namespace ebpf { -////// -bool IsProcessNamespaceFilterTypeValid(const std::string& type); +static const std::unordered_map> callNameDict + = {{SecurityProbeType::PROCESS, + {"sys_enter_execve", "sys_enter_clone", "disassociate_ctty", "acct_process", "wake_up_new_task"}}, + {SecurityProbeType::FILE, {"security_file_permission", "security_mmap_file", "security_path_truncate"}}, + {SecurityProbeType::NETWORK, {"tcp_connect", "tcp_close", "tcp_sendmsg"}}}; bool InitObserverNetworkOptionInner(const Json::Value& probeConfig, nami::ObserverNetworkOption& thisObserverNetworkOption, const PipelineContext* mContext, const std::string& sName) { std::string errorMsg; + // EnableEvent (Optional) + if (!GetOptionalBoolParam(probeConfig, "EnableLog", thisObserverNetworkOption.mEnableLog, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + // EnableSpan (Optional) + if (!GetOptionalBoolParam(probeConfig, "EnableSpan", thisObserverNetworkOption.mEnableSpan, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + // EnableMetric (Optional) + if (!GetOptionalBoolParam(probeConfig, "EnableMetric", thisObserverNetworkOption.mEnableMetric, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } // MeterHandlerType (Optional) if (!GetOptionalStringParam(probeConfig, "MeterHandlerType", thisObserverNetworkOption.mMeterHandlerType, errorMsg)) { PARAM_WARNING_IGNORE(mContext->GetLogger(), @@ -150,27 +186,67 @@ bool InitObserverNetworkOption(const Json::Value& config, return InitObserverNetworkOptionInner(probeConfig, thisObserverNetworkOption, mContext, sName); } -////// -bool InitSecurityFileFilter(const Json::Value& config, +void InitSecurityFileFilter(const Json::Value& config, nami::SecurityFileFilter& thisFileFilter, const PipelineContext* mContext, const std::string& sName) { std::string errorMsg; - for (auto& fileFilterItem : config["FilePathFilter"]) { - nami::SecurityFileFilterItem thisFileFilterItem; - // FilePath (Mandatory) - if (!GetMandatoryStringParam(fileFilterItem, "FilePath", thisFileFilterItem.mFilePath, errorMsg)) { - PARAM_ERROR_RETURN(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); + // FilePathFilter (Optional) + if (!config.isMember("FilePathFilter")) { + // No FilePathFilter, do nothing, no warning + } else if (!config["FilePathFilter"].isArray()) { + // FilePathFilter is not empty but of wrong type + errorMsg = "FilePathFilter is not of type list"; + } else if (!GetOptionalListFilterParam( + config, "FilePathFilter", thisFileFilter.mFilePathList, errorMsg)) { + // FilePathFilter has element of wrong type + } + if (!errorMsg.empty()) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } +} + +void InitSecurityNetworkFilter(const Json::Value& config, + nami::SecurityNetworkFilter& thisNetworkFilter, + const PipelineContext* mContext, + const std::string& sName) { + std::string errorMsg; + // AddrFilter (Optional) + if (!config.isMember("AddrFilter")) { + // No AddrFilter, do nothing + } else if (!config["AddrFilter"].isObject()) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + "AddrFilter is not of type map", + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } else { + auto addrFilterConfig = config["AddrFilter"]; + // DestAddrList (Optional) + if (!GetOptionalListFilterParam( + addrFilterConfig, "DestAddrList", thisNetworkFilter.mDestAddrList, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); } - // FileName (Optional) - if (!GetOptionalStringParam(fileFilterItem, "FileName", thisFileFilterItem.mFileName, errorMsg)) { + // DestPortList (Optional) + if (!GetOptionalListFilterParam( + addrFilterConfig, "DestPortList", thisNetworkFilter.mDestPortList, errorMsg)) { PARAM_WARNING_IGNORE(mContext->GetLogger(), mContext->GetAlarm(), errorMsg, @@ -180,193 +256,128 @@ bool InitSecurityFileFilter(const Json::Value& config, mContext->GetLogstoreName(), mContext->GetRegion()); } - thisFileFilter.mFileFilterItem.emplace_back(thisFileFilterItem); - } - return true; -} - -bool InitSecurityProcessFilter(const Json::Value& config, - nami::SecurityProcessFilter& thisProcessFilter, - const PipelineContext* mContext, - const std::string& sName) { - std::string errorMsg; - // NamespaceFilter (Optional) - if (config.isMember("NamespaceFilter")) { - if (!config["NamespaceFilter"].isArray()) { + // DestAddrBlackList (Optional) + if (!GetOptionalListFilterParam( + addrFilterConfig, "DestAddrBlackList", thisNetworkFilter.mDestAddrBlackList, errorMsg)) { PARAM_WARNING_IGNORE(mContext->GetLogger(), mContext->GetAlarm(), - "NamespaceFilter is not of type list", + errorMsg, sName, mContext->GetConfigName(), mContext->GetProjectName(), mContext->GetLogstoreName(), mContext->GetRegion()); - } else { - for (auto& namespaceFilterConfig : config["NamespaceFilter"]) { - nami::SecurityProcessNamespaceFilter thisProcessNamespaceFilter; - // NamespaceType (Mandatory) - if (!GetMandatoryStringParam( - namespaceFilterConfig, "NamespaceType", thisProcessNamespaceFilter.mNamespaceType, errorMsg) - || !IsProcessNamespaceFilterTypeValid(thisProcessNamespaceFilter.mNamespaceType)) { - PARAM_ERROR_RETURN(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - // ValueList (Mandatory) - if (!GetMandatoryListParam( - namespaceFilterConfig, "ValueList", thisProcessNamespaceFilter.mValueList, errorMsg)) { - PARAM_ERROR_RETURN(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - thisProcessFilter.mNamespaceFilter.emplace_back(thisProcessNamespaceFilter); - } } - } - - // NamespaceBlackFilter (Optional) - if (config.isMember("NamespaceBlackFilter")) { - // 如果用户两个filter都配置了,不去显式阻塞流水线,但是会打印警告并只执行白名单 - if (config.isMember("NamespaceFilter")) { - PARAM_WARNING_IGNORE( - mContext->GetLogger(), - mContext->GetAlarm(), - "Both NamespaceFilter and NamespaceBlackFilter are configured, only NamespaceFilter will be executed", - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } else if (!config["NamespaceBlackFilter"].isArray()) { + // DestPortBlackList (Optional) + if (!GetOptionalListFilterParam( + addrFilterConfig, "DestPortBlackList", thisNetworkFilter.mDestPortBlackList, errorMsg)) { PARAM_WARNING_IGNORE(mContext->GetLogger(), mContext->GetAlarm(), - "NamespaceBlackFilter is not of type list", + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + // SourceAddrList (Optional) + if (!GetOptionalListFilterParam( + addrFilterConfig, "SourceAddrList", thisNetworkFilter.mSourceAddrList, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + // SourcePortList (Optional) + if (!GetOptionalListFilterParam( + addrFilterConfig, "SourcePortList", thisNetworkFilter.mSourcePortList, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + // SourceAddrBlackList (Optional) + if (!GetOptionalListFilterParam( + addrFilterConfig, "SourceAddrBlackList", thisNetworkFilter.mSourceAddrBlackList, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + // SourcePortBlackList (Optional) + if (!GetOptionalListFilterParam( + addrFilterConfig, "SourcePortBlackList", thisNetworkFilter.mSourcePortBlackList, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, sName, mContext->GetConfigName(), mContext->GetProjectName(), mContext->GetLogstoreName(), mContext->GetRegion()); - } else { - for (auto& namespaceBlackFilterConfig : config["NamespaceBlackFilter"]) { - nami::SecurityProcessNamespaceFilter thisProcessNamespaceFilter; - // NamespaceType (Mandatory) - if (!GetMandatoryStringParam(namespaceBlackFilterConfig, - "NamespaceType", - thisProcessNamespaceFilter.mNamespaceType, - errorMsg) - || !IsProcessNamespaceFilterTypeValid(thisProcessNamespaceFilter.mNamespaceType)) { - PARAM_ERROR_RETURN(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - // ValueList (Mandatory) - if (!GetMandatoryListParam( - namespaceBlackFilterConfig, "ValueList", thisProcessNamespaceFilter.mValueList, errorMsg)) { - PARAM_ERROR_RETURN(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - thisProcessFilter.mNamespaceBlackFilter.emplace_back(thisProcessNamespaceFilter); - } } } - return true; } -bool InitSecurityNetworkFilter(const Json::Value& config, - nami::SecurityNetworkFilter& thisNetworkFilter, - const PipelineContext* mContext, - const std::string& sName) { - std::string errorMsg; - // DestAddrList (Optional) - if (!GetOptionalListParam(config, "DestAddrList", thisNetworkFilter.mDestAddrList, errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - // DestPortList (Optional) - if (!GetOptionalListParam(config, "DestPortList", thisNetworkFilter.mDestPortList, errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - // DestAddrBlackList (Optional) - if (!GetOptionalListParam(config, "DestAddrBlackList", thisNetworkFilter.mDestAddrBlackList, errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - // DestPortBlackList (Optional) - if (!GetOptionalListParam(config, "DestPortBlackList", thisNetworkFilter.mDestPortBlackList, errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); +void FilterValidSecurityProbeCallName(SecurityProbeType type, + std::vector& callNames, + std::string& errorMsg) { + if (type >= SecurityProbeType::MAX) { + errorMsg = "Invalid security eBPF probe type"; + return; } - // SourceAddrList (Optional) - if (!GetOptionalListParam(config, "SourceAddrList", thisNetworkFilter.mSourceAddrList, errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); + std::vector survivedCallNames; + bool allValid = true; + for (auto& callName : callNames) { + if (callNameDict.at(type).find(callName) == callNameDict.at(type).end()) { + if (!allValid) { + errorMsg += ", " + callName; + } else { + errorMsg = "Invalid callnames for security eBPF probe: " + callName; + allValid = false; + } + } else { + survivedCallNames.emplace_back(callName); + } } - // SourcePortList (Optional) - if (!GetOptionalListParam(config, "SourcePortList", thisNetworkFilter.mSourcePortList, errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); + callNames.swap(survivedCallNames); +} + +void GetSecurityProbeDefaultCallName(SecurityProbeType type, std::vector& callNames) { + callNames.assign(callNameDict.at(type).begin(), callNameDict.at(type).end()); +} + +void InitCallNameFilter(const Json::Value& config, + std::vector& callNames, + const PipelineContext* mContext, + const std::string& sName, + SecurityProbeType probeType) { + std::string errorMsg; + // CallNameFilter (Optional) + if (!config.isMember("CallNameFilter")) { + // No CallNameFilter, use default callnames, no warning + } else if (!config["CallNameFilter"].isArray()) { + // CallNameFilter is not empty but of wrong type, use default callnames + errorMsg = "CallNameFilter is not of type list"; + } else if (!GetOptionalListFilterParam(config, "CallNameFilter", callNames, errorMsg)) { + // CallNameFilter has element of wrong type, use default callnames + } else { + FilterValidSecurityProbeCallName(probeType, callNames, errorMsg); + // If CallNameFilter contains valid callnames, use user defined callnames, otherwise use default callnames } - // SourceAddrBlackList (Optional) - if (!GetOptionalListParam( - config, "SourceAddrBlackList", thisNetworkFilter.mSourceAddrBlackList, errorMsg)) { + if (!errorMsg.empty()) { PARAM_WARNING_IGNORE(mContext->GetLogger(), mContext->GetAlarm(), errorMsg, @@ -376,128 +387,105 @@ bool InitSecurityNetworkFilter(const Json::Value& config, mContext->GetLogstoreName(), mContext->GetRegion()); } - // SourcePortBlackList (Optional) - if (!GetOptionalListParam( - config, "SourcePortBlackList", thisNetworkFilter.mSourcePortBlackList, errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); + // Use default callnames + if (callNames.empty()) { + GetSecurityProbeDefaultCallName(probeType, callNames); } - return true; } -bool IsProcessNamespaceFilterTypeValid(const std::string& type) { - const std::unordered_set dic - = {"Uts", "Ipc", "Mnt", "Pid", "PidForChildren", "Net", "Cgroup", "User", "Time", "TimeForChildren"}; - return dic.find(type) != dic.end(); +bool CheckProbeConfigValid(const Json::Value& config, std::string& errorMsg) { + errorMsg.clear(); + if (!config.isMember("ProbeConfig")) { + // No ProbeConfig, use default, no warning + return false; + } else if (!config["ProbeConfig"].isArray()) { + // ProbeConfig is not empty but of wrong type, use default + errorMsg = "ProbeConfig is not of type list, use probe config with default filter"; + return false; + } + return true; } - -bool SecurityOptions::Init(SecurityFilterType filterType, +bool SecurityOptions::Init(SecurityProbeType probeType, const Json::Value& config, const PipelineContext* mContext, const std::string& sName) { std::string errorMsg; - // ProbeConfig (Mandatory) - if (!IsValidList(config, "ProbeConfig", errorMsg)) { - PARAM_ERROR_RETURN(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } - for (auto& innerConfig : config["ProbeConfig"]) { - nami::SecurityOption thisSecurityOption; - std::string errorMsg; - // CallName (Optional) - if (!GetOptionalListParam(innerConfig, "CallName", thisSecurityOption.call_names_, errorMsg)) { + // ProbeConfig (Optional) + if (!CheckProbeConfigValid(config, errorMsg)) { + if (!errorMsg.empty()) { PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); } - - // Filter - switch (filterType) { - case SecurityFilterType::FILE: { + nami::SecurityOption thisSecurityOption; + GetSecurityProbeDefaultCallName(probeType, thisSecurityOption.call_names_); + mOptionList.emplace_back(thisSecurityOption); + return true; + } + std::unordered_set thisCallNameSet; + for (auto& innerConfig : config["ProbeConfig"]) { + nami::SecurityOption thisSecurityOption; + // Genral Filter (Optional) + std::variant thisFilter; + switch (probeType) { + case SecurityProbeType::FILE: { nami::SecurityFileFilter thisFileFilter; - if (!IsValidList(innerConfig, "FilePathFilter", errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } else { - if (!InitSecurityFileFilter(innerConfig, thisFileFilter, mContext, sName)) { - return false; - } - } - thisSecurityOption.filter_.emplace(thisFileFilter); + InitSecurityFileFilter(innerConfig, thisFileFilter, mContext, sName); + thisFilter.emplace(thisFileFilter); break; } - case SecurityFilterType::PROCESS: { - nami::SecurityProcessFilter thisProcessFilter; - if (!InitSecurityProcessFilter(innerConfig, thisProcessFilter, mContext, sName)) { - return false; - } - thisSecurityOption.filter_.emplace(thisProcessFilter); + case SecurityProbeType::NETWORK: { + nami::SecurityNetworkFilter thisNetworkFilter; + InitSecurityNetworkFilter(innerConfig, thisNetworkFilter, mContext, sName); + thisFilter.emplace(thisNetworkFilter); break; } - case SecurityFilterType::NETWORK: { - nami::SecurityNetworkFilter thisNetworkFilter; - if (!IsValidMap(innerConfig, "AddrFilter", errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } else { - const Json::Value& filterConfig = innerConfig["AddrFilter"]; - if (!InitSecurityNetworkFilter(filterConfig, thisNetworkFilter, mContext, sName)) { - return false; - } - } - thisSecurityOption.filter_.emplace(thisNetworkFilter); + case SecurityProbeType::PROCESS: { break; } default: PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - "Unknown filter type", - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - return false; + mContext->GetAlarm(), + "Unknown security eBPF probe type", + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + // CallNameFilter (Optional) + std::vector thisCallNames; + InitCallNameFilter(innerConfig, thisCallNames, mContext, sName, probeType); + // Check duplicate callnames and remove them + for (auto& callName : thisCallNames) { + if (thisCallNameSet.find(callName) == thisCallNameSet.end()) { + thisCallNameSet.insert(callName); + thisSecurityOption.call_names_.emplace_back(callName); + } else { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + "Duplicate callname " + callName + " is discarded", + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + } + // If callnames in this option are all duplicated, discard this option + if (!thisSecurityOption.call_names_.empty()) { + thisSecurityOption.filter_ = thisFilter; + mOptionList.emplace_back(thisSecurityOption); } - - - // if (!thisSecurityOption.Init(filterType, innerConfig, mContext, sName)) { - // return false; - // } - mOptionList.emplace_back(thisSecurityOption); } - mFilterType = filterType; + mProbeType = probeType; return true; } diff --git a/core/ebpf/config.h b/core/ebpf/config.h index 0b4614f603..b892433151 100644 --- a/core/ebpf/config.h +++ b/core/ebpf/config.h @@ -36,17 +36,17 @@ bool InitObserverNetworkOption(const Json::Value& config, ///////////////////// ///////////////////// -enum class SecurityFilterType { PROCESS, FILE, NETWORK }; +enum class SecurityProbeType { PROCESS, FILE, NETWORK, MAX }; class SecurityOptions { public: - bool Init(SecurityFilterType filterType, + bool Init(SecurityProbeType filterType, const Json::Value& config, const PipelineContext* mContext, const std::string& sName); std::vector mOptionList; - SecurityFilterType mFilterType; + SecurityProbeType mProbeType; }; ///////////////////// Process Level Config ///////////////////// diff --git a/core/ebpf/eBPFServer.cpp b/core/ebpf/eBPFServer.cpp index bb794edd0c..bc684eda79 100644 --- a/core/ebpf/eBPFServer.cpp +++ b/core/ebpf/eBPFServer.cpp @@ -37,6 +37,7 @@ void eBPFServer::Init() { // ebpf config auto configJson = AppConfig::GetInstance()->GetConfig(); mAdminConfig.LoadEbpfConfig(configJson); + mEventCB = std::make_unique(nullptr, -1, 0); #ifdef __ENTERPRISE__ mMeterCB = std::make_unique(nullptr, -1, 0); mSpanCB = std::make_unique(nullptr, -1, 0); @@ -52,9 +53,18 @@ void eBPFServer::Init() { } void eBPFServer::Stop() { + if (!mInited) return; + mInited = false; LOG_INFO(sLogger, ("begin to stop all plugins", "")); mSourceManager->StopAll(); + // destroy source manager + mSourceManager.reset(); + for (std::size_t i = 0; i < mLoadedPipeline.size(); i ++) { + UpdatePipelineName(static_cast(i), ""); + } + // UpdateContext must after than StopPlugin + if (mEventCB) mEventCB->UpdateContext(nullptr, -1, -1); if (mMeterCB) mMeterCB->UpdateContext(nullptr, -1, -1); if (mSpanCB) mSpanCB->UpdateContext(nullptr,-1, -1); if (mNetworkSecureCB) mNetworkSecureCB->UpdateContext(nullptr,-1, -1); @@ -95,11 +105,21 @@ bool eBPFServer::StartPluginInternal(const std::string& pipeline_name, uint32_t case nami::PluginType::NETWORK_OBSERVE:{ nami::NetworkObserveConfig nconfig; - nconfig.measure_cb_ = [this](auto events, auto ts) { return mMeterCB->handle(std::move(events), ts); }; - nconfig.span_cb_ = [this](auto events) { return mSpanCB->handle(std::move(events)); }; + nami::ObserverNetworkOption* opts = std::get(options); + if (opts->mEnableMetric) { + nconfig.measure_cb_ = [this](auto events, auto ts) { return mMeterCB->handle(std::move(events), ts); }; + mMeterCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); + } + if (opts->mEnableSpan) { + nconfig.span_cb_ = [this](auto events) { return mSpanCB->handle(std::move(events)); }; + mSpanCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); + } + if (opts->mEnableLog) { + nconfig.event_cb_ = [this](auto events) { return mEventCB->handle(std::move(events)); }; + mEventCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); + } + config = std::move(nconfig); - mMeterCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); - mSpanCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); ret = mSourceManager->StartPlugin(type, config); break; } @@ -135,10 +155,19 @@ bool eBPFServer::StartPluginInternal(const std::string& pipeline_name, uint32_t return ret; } +bool eBPFServer::HasRegisteredPlugins() const { + std::lock_guard lk(mMtx); + for (auto& pipeline : mLoadedPipeline) { + if (!pipeline.empty()) return true; + } + return false; +} + bool eBPFServer::EnablePlugin(const std::string& pipeline_name, uint32_t plugin_index, nami::PluginType type, const PipelineContext* ctx, const std::variant options) { + Init(); return StartPluginInternal(pipeline_name, plugin_index, type, ctx, options); } @@ -184,6 +213,7 @@ void eBPFServer::UpdateCBContext(nami::PluginType type, const logtail::PipelineC case nami::PluginType::NETWORK_OBSERVE:{ if (mMeterCB) mMeterCB->UpdateContext(ctx, key, idx); if (mSpanCB) mSpanCB->UpdateContext(ctx, key, idx); + if (mEventCB) mEventCB->UpdateContext(ctx, key, idx); return; } case nami::PluginType::NETWORK_SECURITY:{ diff --git a/core/ebpf/eBPFServer.h b/core/ebpf/eBPFServer.h index 8e5a6a219e..d1e6728eea 100644 --- a/core/ebpf/eBPFServer.h +++ b/core/ebpf/eBPFServer.h @@ -21,6 +21,7 @@ #include #include +#include "runner/InputRunner.h" #include "pipeline/PipelineContext.h" #include "ebpf/SourceManager.h" #include "ebpf/config.h" @@ -32,19 +33,19 @@ namespace logtail { namespace ebpf { -class eBPFServer { +class eBPFServer : public InputRunner { public: eBPFServer(const eBPFServer&) = delete; eBPFServer& operator=(const eBPFServer&) = delete; - void Init(); + void Init() override; static eBPFServer* GetInstance() { static eBPFServer instance; return &instance; } - void Stop(); + void Stop() override; std::string CheckLoadedPipelineName(nami::PluginType type); void UpdatePipelineName(nami::PluginType type, const std::string& name); @@ -58,6 +59,8 @@ class eBPFServer { bool SuspendPlugin(const std::string& pipeline_name, nami::PluginType type); + bool HasRegisteredPlugins() const override; + private: bool StartPluginInternal(const std::string& pipeline_name, uint32_t plugin_index, nami::PluginType type, @@ -70,13 +73,14 @@ class eBPFServer { std::unique_ptr mSourceManager; // source manager + std::unique_ptr mEventCB; std::unique_ptr mMeterCB; std::unique_ptr mSpanCB; std::unique_ptr mNetworkSecureCB; std::unique_ptr mProcessSecureCB; std::unique_ptr mFileSecureCB; - std::mutex mMtx; + mutable std::mutex mMtx; std::array mLoadedPipeline = {}; eBPFAdminConfig mAdminConfig; diff --git a/core/ebpf/handler/ObserveHandler.cpp b/core/ebpf/handler/ObserveHandler.cpp index 5dcc23de8c..b2bec42e57 100644 --- a/core/ebpf/handler/ObserveHandler.cpp +++ b/core/ebpf/handler/ObserveHandler.cpp @@ -24,8 +24,8 @@ #include "models/PipelineEventGroup.h" #include "models/PipelineEvent.h" #include "logger/Logger.h" -#include "queue/ProcessQueueManager.h" -#include "queue/ProcessQueueItem.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/ProcessQueueItem.h" namespace logtail { namespace ebpf { @@ -58,13 +58,13 @@ void FUNC_NAME(PipelineEventGroup& group, std::unique_ptr& measure, uin void OtelMeterHandler::handle(std::vector>&& measures, uint64_t timestamp) { if (measures.empty()) return; - for (auto& app_batch_measures : measures) { - PipelineEventGroup event_group(std::make_shared()); - for (auto& measure : app_batch_measures->measures_) { + for (auto& appBatchMeasures : measures) { + PipelineEventGroup eventGroup(std::make_shared()); + for (auto& measure : appBatchMeasures->measures_) { auto type = measure->type_; if (type == MeasureType::MEASURE_TYPE_APP) { auto inner = static_cast(measure->inner_measure_.get()); - auto event = event_group.AddMetricEvent(); + auto event = eventGroup.AddMetricEvent(); for (auto& tag : measure->tags_) { event->SetTag(tag.first, tag.second); } @@ -77,7 +77,7 @@ void OtelMeterHandler::handle(std::vector item = std::make_unique(std::move(event_group), mPluginIdx); + std::unique_ptr item = std::make_unique(std::move(eventGroup), mPluginIdx); if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item))) { LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Otel Metrics] push queue failed!", "")); } @@ -89,12 +89,11 @@ void OtelMeterHandler::handle(std::vector>&& spans) { if (spans.empty()) return; - std::shared_ptr source_buffer = std::make_shared(); - PipelineEventGroup event_group(source_buffer); - for (auto& span : spans) { + std::shared_ptr sourceBuffer = std::make_shared(); + PipelineEventGroup eventGroup(sourceBuffer); for (auto& x : span->single_spans_) { - auto spanEvent = event_group.AddSpanEvent(); + auto spanEvent = eventGroup.AddSpanEvent(); for (auto& tag : x->tags_) { spanEvent->SetTag(tag.first, tag.second); } @@ -109,7 +108,7 @@ void OtelSpanHandler::handle(std::vector>& #ifdef APSARA_UNIT_TEST_MAIN continue; #endif - std::unique_ptr item = std::make_unique(std::move(event_group), mPluginIdx); + std::unique_ptr item = std::make_unique(std::move(eventGroup), mPluginIdx); if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item))) { LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Span] push queue failed!", "")); } @@ -119,6 +118,36 @@ void OtelSpanHandler::handle(std::vector>& return; } +void EventHandler::handle(std::vector>&& events) { + if (events.empty()) return; + + for (auto& appEvents : events) { + if (!appEvents || appEvents->events_.empty()) continue; + std::shared_ptr sourceBuffer = std::make_shared(); + PipelineEventGroup eventGroup(sourceBuffer); + for (auto& event : appEvents->events_) { + if (!event || event->GetAllTags().empty()) continue; + auto logEvent = eventGroup.AddLogEvent(); + for (auto& tag : event->GetAllTags()) { + logEvent->SetContent(tag.first, tag.second); + auto seconds = std::chrono::duration_cast(std::chrono::nanoseconds(event->GetTimestamp())); + logEvent->SetTimestamp(seconds.count(), event->GetTimestamp() - seconds.count() * 1e9); + } + mProcessTotalCnt ++; + } + for (auto& tag : appEvents->tags_) { + eventGroup.SetTag(tag.first, tag.second); + } +#ifdef APSARA_UNIT_TEST_MAIN + continue; +#endif + std::unique_ptr item = std::make_unique(std::move(eventGroup), mPluginIdx); + if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item))) { + LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Event] push queue failed!", "")); + } + } +} + #ifdef __ENTERPRISE__ const static std::string app_id_key = "arms.appId"; @@ -170,11 +199,11 @@ void ArmsSpanHandler::handle(std::vector>& if (spans.empty()) return; for (auto& span : spans) { - std::shared_ptr source_buffer = std::make_shared(); - PipelineEventGroup event_group(source_buffer); - event_group.SetTag(app_id_key, span->app_id_); + std::shared_ptr sourceBuffer = std::make_shared(); + PipelineEventGroup eventGroup(sourceBuffer); + eventGroup.SetTag(app_id_key, span->app_id_); for (auto& x : span->single_spans_) { - auto spanEvent = event_group.AddSpanEvent(); + auto spanEvent = eventGroup.AddSpanEvent(); for (auto& tag : x->tags_) { spanEvent->SetTag(tag.first, tag.second); } @@ -189,7 +218,7 @@ void ArmsSpanHandler::handle(std::vector>& #ifdef APSARA_UNIT_TEST_MAIN continue; #endif - std::unique_ptr item = std::make_unique(std::move(event_group), mPluginIdx); + std::unique_ptr item = std::make_unique(std::move(eventGroup), mPluginIdx); if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item))) { LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Span] push queue failed!", "")); } @@ -201,30 +230,30 @@ void ArmsSpanHandler::handle(std::vector>& void ArmsMeterHandler::handle(std::vector>&& measures, uint64_t timestamp) { if (measures.empty()) return; - for (auto& app_batch_measures : measures) { - std::shared_ptr source_buffer = std::make_shared();; - PipelineEventGroup event_group(source_buffer); + for (auto& appBatchMeasures : measures) { + std::shared_ptr sourceBuffer = std::make_shared();; + PipelineEventGroup eventGroup(sourceBuffer); // source_ip - event_group.SetTag(std::string(app_id_key), app_batch_measures->app_id_); - event_group.SetTag(std::string(ip_key), app_batch_measures->ip_); - for (auto& measure : app_batch_measures->measures_) { + eventGroup.SetTag(std::string(app_id_key), appBatchMeasures->app_id_); + eventGroup.SetTag(std::string(ip_key), appBatchMeasures->ip_); + for (auto& measure : appBatchMeasures->measures_) { auto type = measure->type_; if (type == MeasureType::MEASURE_TYPE_APP) { - GenerateRequestsTotalMetrics(event_group, measure, timestamp); - GenerateRequestsSlowMetrics(event_group, measure, timestamp); - GenerateRequestsErrorMetrics(event_group, measure, timestamp); - GenerateRequestsDurationSumMetrics(event_group, measure, timestamp); - GenerateRequestsStatusMetrics(event_group, measure, timestamp); + GenerateRequestsTotalMetrics(eventGroup, measure, timestamp); + GenerateRequestsSlowMetrics(eventGroup, measure, timestamp); + GenerateRequestsErrorMetrics(eventGroup, measure, timestamp); + GenerateRequestsDurationSumMetrics(eventGroup, measure, timestamp); + GenerateRequestsStatusMetrics(eventGroup, measure, timestamp); } else if (type == MeasureType::MEASURE_TYPE_NET) { - GenerateTcpDropTotalMetrics(event_group, measure, timestamp); - GenerateTcpRetransTotalMetrics(event_group, measure, timestamp); - GenerateTcpConnectionTotalMetrics(event_group, measure, timestamp); - GenerateTcpRecvPktsTotalMetrics(event_group, measure, timestamp); - GenerateTcpRecvBytesTotalMetrics(event_group, measure, timestamp); - GenerateTcpSendPktsTotalMetrics(event_group, measure, timestamp); - GenerateTcpSendBytesTotalMetrics(event_group, measure, timestamp); + GenerateTcpDropTotalMetrics(eventGroup, measure, timestamp); + GenerateTcpRetransTotalMetrics(eventGroup, measure, timestamp); + GenerateTcpConnectionTotalMetrics(eventGroup, measure, timestamp); + GenerateTcpRecvPktsTotalMetrics(eventGroup, measure, timestamp); + GenerateTcpRecvBytesTotalMetrics(eventGroup, measure, timestamp); + GenerateTcpSendPktsTotalMetrics(eventGroup, measure, timestamp); + GenerateTcpSendBytesTotalMetrics(eventGroup, measure, timestamp); } mProcessTotalCnt++; } @@ -232,7 +261,7 @@ void ArmsMeterHandler::handle(std::vector item = std::make_unique(std::move(event_group), mPluginIdx); + std::unique_ptr item = std::make_unique(std::move(eventGroup), mPluginIdx); if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item))) { LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Metrics] push queue failed!", "")); } diff --git a/core/ebpf/handler/ObserveHandler.h b/core/ebpf/handler/ObserveHandler.h index abefad706d..de5241a7e9 100644 --- a/core/ebpf/handler/ObserveHandler.h +++ b/core/ebpf/handler/ObserveHandler.h @@ -47,6 +47,12 @@ class OtelSpanHandler : public SpanHandler { void handle(std::vector>&&) override; }; +class EventHandler : public AbstractHandler { +public: + EventHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : AbstractHandler(ctx, key, idx) {} + void handle(std::vector>&&); +}; + #ifdef __ENTERPRISE__ class ArmsMeterHandler : public MeterHandler { diff --git a/core/ebpf/handler/SecurityHandler.cpp b/core/ebpf/handler/SecurityHandler.cpp index 9f37f82b26..5121f01462 100644 --- a/core/ebpf/handler/SecurityHandler.cpp +++ b/core/ebpf/handler/SecurityHandler.cpp @@ -21,8 +21,8 @@ #include "models/PipelineEventGroup.h" #include "models/PipelineEvent.h" #include "logger/Logger.h" -#include "queue/ProcessQueueManager.h" -#include "queue/ProcessQueueItem.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/ProcessQueueItem.h" #include "common/MachineInfoUtil.h" namespace logtail { diff --git a/core/ebpf/include/export.h b/core/ebpf/include/export.h index a42100e445..1e18d57ad1 100644 --- a/core/ebpf/include/export.h +++ b/core/ebpf/include/export.h @@ -1,6 +1,6 @@ -/** - * used for sysak - */ +// +// Created by qianlu on 2024/6/19. +// #pragma once @@ -20,11 +20,12 @@ enum class SecureEventType { class AbstractSecurityEvent { public: - AbstractSecurityEvent(std::vector>&& tags,SecureEventType type, uint64_t ts) + AbstractSecurityEvent(std::vector>&& tags, SecureEventType type, uint64_t ts) : tags_(tags), type_(type), timestamp_(ts) {} SecureEventType GetEventType() {return type_;} std::vector> GetAllTags() { return tags_; } uint64_t GetTimestamp() { return timestamp_; } + void SetEventType(SecureEventType type) { type_ = type; } void SetTimestamp(uint64_t ts) { timestamp_ = ts; } void AppendTags(std::pair&& tag) { tags_.emplace_back(std::move(tag)); @@ -47,6 +48,9 @@ class BatchAbstractSecurityEvent { std::vector> events; }; +using HandleSingleDataEventFn = std::function&& event)>; +using HandleBatchDataEventFn = std::function>&& events)>; + enum class UpdataType { SECURE_UPDATE_TYPE_ENABLE_PROBE, SECURE_UPDATE_TYPE_CONFIG_CHAGE, @@ -63,7 +67,7 @@ enum class UpdataType { enum MeasureType {MEASURE_TYPE_APP, MEASURE_TYPE_NET, MEASURE_TYPE_PROCESS, MEASURE_TYPE_MAX}; struct AbstractSingleMeasure { - + virtual ~AbstractSingleMeasure() = default; }; struct NetSingleMeasure : public AbstractSingleMeasure { @@ -100,9 +104,9 @@ struct Measure { // process struct ApplicationBatchMeasure { std::string app_id_; + std::string region_id_; std::string ip_; std::vector> measures_; - uint64_t timestamp_; }; enum SpanKindInner { Unspecified, Internal, Server, Client, Producer, Consumer }; @@ -122,6 +126,41 @@ struct ApplicationBatchSpan { std::vector> single_spans_; }; +class SingleEvent { +public: + explicit __attribute__((visibility("default"))) SingleEvent(){} + explicit __attribute__((visibility("default"))) SingleEvent(std::vector>&& tags, uint64_t ts) + : tags_(tags), timestamp_(ts) {} + std::vector> GetAllTags() { return tags_; } + uint64_t GetTimestamp() { return timestamp_; } + void SetTimestamp(uint64_t ts) { timestamp_ = ts; } + void AppendTags(std::pair&& tag) { + tags_.emplace_back(std::move(tag)); + } + +private: + std::vector> tags_; + uint64_t timestamp_; +}; + +class ApplicationBatchEvent { +public: + explicit __attribute__((visibility("default"))) ApplicationBatchEvent(){} + explicit __attribute__((visibility("default"))) ApplicationBatchEvent(const std::string& app_id, std::vector>&& tags) : app_id_(app_id), tags_(tags) {} + explicit __attribute__((visibility("default"))) ApplicationBatchEvent(const std::string& app_id, std::vector>&& tags, std::vector>&& events) + : app_id_(app_id), tags_(std::move(tags)), events_(std::move(events)) {} + void SetEvents(std::vector>&& events) { events_ = std::move(events); } + void AppendEvent(std::unique_ptr&& event) { events_.emplace_back(std::move(event)); } + void AppendEvents(std::vector>&& events) { + for (auto& x : events) { + events_.emplace_back(std::move(x)); + } + } + std::string app_id_; // pid + std::vector> tags_; // container.id + std::vector> events_; +}; + /////// merged config ///////// namespace nami { @@ -138,8 +177,10 @@ enum class PluginType { // observe metrics using NamiHandleBatchMeasureFunc = std::function>&& measures, uint64_t timestamp)>; -// observe span +// observe spans using NamiHandleBatchSpanFunc = std::function>&&)>; +// observe events +using NamiHandleBatchEventFunc = std::function>&&)>; // observe security using NamiHandleBatchDataEventFn = std::function>&& events)>; @@ -148,69 +189,44 @@ struct ObserverNetworkOption { bool mDisableProtocolParse = false; bool mDisableConnStats = false; bool mEnableConnTrackerDump = false; + bool mEnableSpan = false; + bool mEnableMetric = false; + bool mEnableLog = true; std::string mMeterHandlerType; std::string mSpanHandlerType; }; // file -struct SecurityFileFilterItem { - std::string mFilePath = ""; - std::string mFileName = ""; - bool operator==(const SecurityFileFilterItem& other) const { - return mFilePath == other.mFilePath && mFileName == other.mFileName; - } -}; struct SecurityFileFilter { - std::vector mFileFilterItem; - bool operator==(const SecurityFileFilter& other) const { - return mFileFilterItem == other.mFileFilterItem; - } -}; - -// process -struct SecurityProcessNamespaceFilter { - // type of securityNamespaceFilter - std::string mNamespaceType = ""; - std::vector mValueList; - bool operator==(const SecurityProcessNamespaceFilter& other) const { - return mNamespaceType == other.mNamespaceType && - mValueList == other.mValueList; - } -}; -struct SecurityProcessFilter { - std::vector mNamespaceFilter; - std::vector mNamespaceBlackFilter; - bool operator==(const SecurityProcessFilter& other) const { - return mNamespaceFilter == other.mNamespaceFilter && - mNamespaceBlackFilter == other.mNamespaceBlackFilter; - } + std::vector mFilePathList; + bool operator==(const SecurityFileFilter& other) const { return mFilePathList == other.mFilePathList; } }; // network struct SecurityNetworkFilter { - std::vector mDestAddrList; - std::vector mDestPortList; - std::vector mDestAddrBlackList; - std::vector mDestPortBlackList; - std::vector mSourceAddrList; - std::vector mSourcePortList; - std::vector mSourceAddrBlackList; - std::vector mSourcePortBlackList; - bool operator==(const SecurityNetworkFilter& other) const { - return mDestAddrList == other.mDestAddrList && - mDestPortList == other.mDestPortList && - mDestAddrBlackList == other.mDestAddrBlackList && - mDestPortBlackList == other.mDestPortBlackList && - mSourceAddrList == other.mSourceAddrList && - mSourcePortList == other.mSourcePortList && - mSourceAddrBlackList == other.mSourceAddrBlackList && - mSourcePortBlackList == other.mSourcePortBlackList; - } + std::vector mDestAddrList; + std::vector mDestPortList; + std::vector mDestAddrBlackList; + std::vector mDestPortBlackList; + std::vector mSourceAddrList; + std::vector mSourcePortList; + std::vector mSourceAddrBlackList; + std::vector mSourcePortBlackList; + bool operator==(const SecurityNetworkFilter& other) const { + return mDestAddrList == other.mDestAddrList && + mDestPortList == other.mDestPortList && + mDestAddrBlackList == other.mDestAddrBlackList && + mDestPortBlackList == other.mDestPortBlackList && + mSourceAddrList == other.mSourceAddrList && + mSourcePortList == other.mSourcePortList && + mSourceAddrBlackList == other.mSourceAddrBlackList && + mSourcePortBlackList == other.mSourcePortBlackList; + } }; struct SecurityOption { std::vector call_names_; - std::variant filter_; + std::variant filter_; bool operator==(const SecurityOption& other) const { return call_names_ == other.call_names_ && filter_ == other.filter_; @@ -228,8 +244,12 @@ struct NetworkObserveConfig { long upca_offset_; long upps_offset_; long upcr_offset_; - NamiHandleBatchMeasureFunc measure_cb_; - NamiHandleBatchSpanFunc span_cb_; + bool enable_span_ = false; + bool enable_metric_ = false; + bool enable_event_ = false; + NamiHandleBatchMeasureFunc measure_cb_ = nullptr; + NamiHandleBatchSpanFunc span_cb_ = nullptr; + NamiHandleBatchEventFunc event_cb_ = nullptr; bool operator==(const NetworkObserveConfig& other) const { return enable_libbpf_debug_ == other.enable_libbpf_debug_ && enable_so_ == other.enable_so_ && @@ -290,4 +310,4 @@ struct eBPFConfig { } }; -}; \ No newline at end of file +}; diff --git a/core/file_server/AdhocFileManager.h b/core/file_server/AdhocFileManager.h index a11dc6a03e..5e4ec943df 100644 --- a/core/file_server/AdhocFileManager.h +++ b/core/file_server/AdhocFileManager.h @@ -18,7 +18,7 @@ #include #include #include "checkpoint/AdhocCheckpointManager.h" -#include "event/Event.h" +#include "file_server/event/Event.h" namespace logtail { diff --git a/core/config_manager/ConfigManager.cpp b/core/file_server/ConfigManager.cpp similarity index 99% rename from core/config_manager/ConfigManager.cpp rename to core/file_server/ConfigManager.cpp index f3184eeedb..00161105e1 100644 --- a/core/config_manager/ConfigManager.cpp +++ b/core/file_server/ConfigManager.cpp @@ -46,14 +46,13 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" #include "common/version.h" -#include "controller/EventDispatcher.h" -#include "event_handler/EventHandler.h" +#include "file_server/EventDispatcher.h" +#include "file_server/event_handler/EventHandler.h" #include "file_server/FileServer.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineManager.h" -#include "processor/daemon/LogProcess.h" using namespace std; diff --git a/core/config_manager/ConfigManager.h b/core/file_server/ConfigManager.h similarity index 99% rename from core/config_manager/ConfigManager.h rename to core/file_server/ConfigManager.h index 3b3ec203c4..71020a467f 100644 --- a/core/config_manager/ConfigManager.h +++ b/core/file_server/ConfigManager.h @@ -24,7 +24,7 @@ #include "common/Lock.h" #include "container_manager/ConfigContainerInfoUpdateCmd.h" -#include "event/Event.h" +#include "file_server/event/Event.h" #include "file_server/FileDiscoveryOptions.h" namespace logtail { diff --git a/core/file_server/ContainerInfo.h b/core/file_server/ContainerInfo.h index 8d3e4afcf8..0f08db7999 100644 --- a/core/file_server/ContainerInfo.h +++ b/core/file_server/ContainerInfo.h @@ -24,7 +24,7 @@ #include #include "container_manager/ConfigContainerInfoUpdateCmd.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { diff --git a/core/controller/EventDispatcher.cpp b/core/file_server/EventDispatcher.cpp similarity index 98% rename from core/controller/EventDispatcher.cpp rename to core/file_server/EventDispatcher.cpp index 30449f1602..e9d3abab82 100644 --- a/core/controller/EventDispatcher.cpp +++ b/core/file_server/EventDispatcher.cpp @@ -39,28 +39,27 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" #include "common/version.h" -#include "event/Event.h" -#include "event_handler/EventHandler.h" -#include "event_handler/LogInput.h" -#include "log_pb/metric.pb.h" -#include "log_pb/sls_logs.pb.h" +#include "file_server/event/Event.h" +#include "file_server/event_handler/EventHandler.h" +#include "file_server/event_handler/LogInput.h" +#include "protobuf/sls/metric.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "monitor/MetricExportor.h" -#include "polling/PollingDirFile.h" -#include "polling/PollingModify.h" -#include "processor/daemon/LogProcess.h" +#include "file_server/polling/PollingDirFile.h" +#include "file_server/polling/PollingModify.h" #ifdef APSARA_UNIT_TEST_MAIN -#include "polling/PollingEventQueue.h" +#include "file_server/polling/PollingEventQueue.h" #endif #include "application/Application.h" -#include "config_manager/ConfigManager.h" +#include "file_server/ConfigManager.h" +#include "file_server/FileServer.h" #include "go_pipeline/LogtailPlugin.h" +#include "plugin/input/InputContainerStdio.h" +#include "plugin/input/InputFile.h" #include "pipeline/PipelineManager.h" -#include "plugin/PluginRegistry.h" -#include "file_server/FileServer.h" -#include "input/InputContainerStdio.h" -#include "input/InputFile.h" +#include "pipeline/plugin/PluginRegistry.h" using namespace std; using namespace sls_logs; diff --git a/core/controller/EventDispatcher.h b/core/file_server/EventDispatcher.h similarity index 98% rename from core/controller/EventDispatcher.h rename to core/file_server/EventDispatcher.h index 6511cc9cfe..d3162720a4 100644 --- a/core/controller/EventDispatcher.h +++ b/core/file_server/EventDispatcher.h @@ -30,9 +30,9 @@ #include #include #include "monitor/LogFileProfiler.h" -#include "polling/PollingModify.h" -#include "polling/PollingDirFile.h" -#include "event_listener/EventListener.h" +#include "file_server/polling/PollingModify.h" +#include "file_server/polling/PollingDirFile.h" +#include "file_server/event_listener/EventListener.h" #include "checkpoint/CheckPointManager.h" #include "file_server/FileDiscoveryOptions.h" namespace logtail { diff --git a/core/file_server/FileDiscoveryOptions.cpp b/core/file_server/FileDiscoveryOptions.cpp index 576f199e29..7aeda2786d 100644 --- a/core/file_server/FileDiscoveryOptions.cpp +++ b/core/file_server/FileDiscoveryOptions.cpp @@ -131,7 +131,7 @@ bool FileDiscoveryOptions::CompareByDepthAndCreateTime( return false; } -bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginName) { +bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginType) { string errorMsg; // FilePaths + MaxDirSearchDepth @@ -139,7 +139,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_ERROR_RETURN(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -149,7 +149,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_ERROR_RETURN(ctx.GetLogger(), ctx.GetAlarm(), "list param FilePaths has more than 1 element", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -161,7 +161,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_ERROR_RETURN(ctx.GetLogger(), ctx.GetAlarm(), "string param FilePaths[0] is invalid", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -186,7 +186,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext ctx.GetAlarm(), errorMsg, mMaxDirSearchDepth, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -201,7 +201,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext ctx.GetAlarm(), errorMsg, mPreservedDirDepth, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -213,7 +213,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -224,7 +224,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), "string param ExcludeFilePaths[" + ToString(i) + "] is not absolute", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -245,7 +245,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -256,7 +256,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), "string param ExcludeFiles[" + ToString(i) + "] contains path separator", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -272,7 +272,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -283,7 +283,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), "string param ExcludeDirs[" + ToString(i) + "] is not absolute", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -316,7 +316,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext ctx.GetAlarm(), errorMsg, mAllowingCollectingFilesInRootDir, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -331,7 +331,7 @@ bool FileDiscoveryOptions::Init(const Json::Value& config, const PipelineContext ctx.GetAlarm(), errorMsg, mAllowingIncludedByMultiConfigs, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), diff --git a/core/file_server/FileDiscoveryOptions.h b/core/file_server/FileDiscoveryOptions.h index de2f21f6fc..ffe17b6e19 100644 --- a/core/file_server/FileDiscoveryOptions.h +++ b/core/file_server/FileDiscoveryOptions.h @@ -36,7 +36,7 @@ class FileDiscoveryOptions { static bool CompareByDepthAndCreateTime(std::pair left, std::pair right); - bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginName); + bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginType); const std::string& GetBasePath() const { return mBasePath; } const std::string& GetFilePattern() const { return mFilePattern; } const std::vector& GetWildcardPaths() const { return mWildcardPaths; } diff --git a/core/file_server/FileServer.cpp b/core/file_server/FileServer.cpp index ef9c17a350..49b1953b72 100644 --- a/core/file_server/FileServer.cpp +++ b/core/file_server/FileServer.cpp @@ -18,12 +18,12 @@ #include "common/Flags.h" #include "common/StringTools.h" #include "common/TimeUtil.h" -#include "config_manager/ConfigManager.h" -#include "controller/EventDispatcher.h" -#include "event_handler/LogInput.h" -#include "input/InputFile.h" -#include "polling/PollingDirFile.h" -#include "polling/PollingModify.h" +#include "file_server/EventDispatcher.h" +#include "file_server/event_handler/LogInput.h" +#include "file_server/ConfigManager.h" +#include "plugin/input/InputFile.h" +#include "file_server/polling/PollingDirFile.h" +#include "file_server/polling/PollingModify.h" DEFINE_FLAG_BOOL(enable_polling_discovery, "", true); diff --git a/core/file_server/FileServer.h b/core/file_server/FileServer.h index 718b45db3f..7daee73fc5 100644 --- a/core/file_server/FileServer.h +++ b/core/file_server/FileServer.h @@ -25,7 +25,7 @@ #include "file_server/MultilineOptions.h" #include "monitor/PluginMetricManager.h" #include "pipeline/PipelineContext.h" -#include "reader/FileReaderOptions.h" +#include "file_server/reader/FileReaderOptions.h" namespace logtail { diff --git a/core/file_server/MultilineOptions.cpp b/core/file_server/MultilineOptions.cpp index 3b4df415e5..c31376850e 100644 --- a/core/file_server/MultilineOptions.cpp +++ b/core/file_server/MultilineOptions.cpp @@ -19,7 +19,7 @@ using namespace std; namespace logtail { -bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginName) { +bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginType) { string errorMsg; // Mode @@ -29,7 +29,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct ctx.GetAlarm(), errorMsg, "custom", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -42,7 +42,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct ctx.GetAlarm(), "string param Multiline.Mode is not valid", "custom", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -56,7 +56,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -65,7 +65,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), "string param Multiline.StartPattern is not a valid regex", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -80,7 +80,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -89,7 +89,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), "string param Multiline.ContinuePattern is not a valid regex", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -104,7 +104,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -113,7 +113,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), "string param Multiline.EndPattern is not a valid regex", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -127,11 +127,11 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct LOG_WARNING(ctx.GetLogger(), ("problem encountered in config parsing", "param Multiline.StartPattern and EndPattern are empty but ContinuePattern is not")( - "action", "ignore multiline config")("module", pluginName)("config", ctx.GetConfigName())); + "action", "ignore multiline config")("module", pluginType)("config", ctx.GetConfigName())); ctx.GetAlarm().SendAlarm(CATEGORY_CONFIG_ALARM, "param Multiline.StartPattern and EndPattern are empty but ContinuePattern is " "not: ignore multiline config, module: " - + pluginName + ", config: " + ctx.GetConfigName(), + + pluginType + ", config: " + ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), ctx.GetRegion()); @@ -141,13 +141,13 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct ctx.GetLogger(), ("problem encountered in config parsing", "none of param Multiline.StartPattern, Multiline.ContinuePattern and Multiline.EndPattern are empty")( - "action", "ignore param Multiline.ContinuePattern")("module", pluginName)("config", + "action", "ignore param Multiline.ContinuePattern")("module", pluginType)("config", ctx.GetConfigName())); ctx.GetAlarm().SendAlarm( CATEGORY_CONFIG_ALARM, "none of param Multiline.StartPattern, Multiline.ContinuePattern and Multiline.EndPattern are empty: " "ignore param Multiline.ContinuePattern, module: " - + pluginName + ", config: " + ctx.GetConfigName(), + + pluginType + ", config: " + ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), ctx.GetRegion()); @@ -164,7 +164,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct ctx.GetAlarm(), errorMsg, "single_line", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -176,7 +176,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct ctx.GetAlarm(), "string param Multiline.UnmatchedContentTreatment is not valid", "single_line", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -189,7 +189,7 @@ bool MultilineOptions::Init(const Json::Value& config, const PipelineContext& ct ctx.GetAlarm(), errorMsg, mIgnoringUnmatchWarning, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), diff --git a/core/file_server/MultilineOptions.h b/core/file_server/MultilineOptions.h index f9a49b4f1d..1c5a39cd4c 100644 --- a/core/file_server/MultilineOptions.h +++ b/core/file_server/MultilineOptions.h @@ -31,7 +31,7 @@ class MultilineOptions { enum class Mode { CUSTOM, JSON }; enum class UnmatchedContentTreatment { DISCARD, SINGLE_LINE }; - bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginName); + bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginType); const std::shared_ptr& GetStartPatternReg() const { return mStartPatternRegPtr; } const std::shared_ptr& GetContinuePatternReg() const { return mContinuePatternRegPtr; } const std::shared_ptr& GetEndPatternReg() const { return mEndPatternRegPtr; } diff --git a/core/event/BlockEventManager.cpp b/core/file_server/event/BlockEventManager.cpp similarity index 97% rename from core/event/BlockEventManager.cpp rename to core/file_server/event/BlockEventManager.cpp index 9dd2393e5a..d6ac47998e 100644 --- a/core/event/BlockEventManager.cpp +++ b/core/file_server/event/BlockEventManager.cpp @@ -16,9 +16,8 @@ #include "common/HashUtil.h" #include "common/StringTools.h" -#include "polling/PollingEventQueue.h" -#include "processor/daemon/LogProcess.h" -#include "queue/ProcessQueueManager.h" +#include "file_server/polling/PollingEventQueue.h" +#include "pipeline/queue/ProcessQueueManager.h" #include "logger/Logger.h" DEFINE_FLAG_INT32(max_block_event_timeout, "max block event timeout, seconds", 3); diff --git a/core/event/BlockEventManager.h b/core/file_server/event/BlockEventManager.h similarity index 97% rename from core/event/BlockEventManager.h rename to core/file_server/event/BlockEventManager.h index d95758956f..113a78d2b4 100644 --- a/core/event/BlockEventManager.h +++ b/core/file_server/event/BlockEventManager.h @@ -18,11 +18,11 @@ #include #include -#include "Event.h" +#include "file_server/event/Event.h" #include "common/FeedbackInterface.h" #include "common/Flags.h" #include "common/Lock.h" -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" DECLARE_FLAG_INT32(max_block_event_timeout); diff --git a/core/event/Event.h b/core/file_server/event/Event.h similarity index 100% rename from core/event/Event.h rename to core/file_server/event/Event.h diff --git a/core/event/EventQueue.cpp b/core/file_server/event/EventQueue.cpp similarity index 99% rename from core/event/EventQueue.cpp rename to core/file_server/event/EventQueue.cpp index ac11db93a9..4e46e7b3ef 100644 --- a/core/event/EventQueue.cpp +++ b/core/file_server/event/EventQueue.cpp @@ -13,7 +13,7 @@ // limitations under the License. #include "EventQueue.h" -#include "Event.h" +#include "file_server/event/Event.h" #include "common/StringTools.h" #include "common/Flags.h" #include "common/TimeUtil.h" diff --git a/core/event/EventQueue.h b/core/file_server/event/EventQueue.h similarity index 100% rename from core/event/EventQueue.h rename to core/file_server/event/EventQueue.h diff --git a/core/event_handler/EventHandler.cpp b/core/file_server/event_handler/EventHandler.cpp similarity index 99% rename from core/event_handler/EventHandler.cpp rename to core/file_server/event_handler/EventHandler.cpp index 9c3417a5aa..a41c3652d9 100644 --- a/core/event_handler/EventHandler.cpp +++ b/core/file_server/event_handler/EventHandler.cpp @@ -18,21 +18,21 @@ #include #include -#include "LogInput.h" +#include "file_server/event_handler/LogInput.h" #include "app_config/AppConfig.h" #include "common/FileSystemUtil.h" #include "common/RuntimeUtil.h" #include "common/StringTools.h" #include "common/TimeUtil.h" -#include "config_manager/ConfigManager.h" -#include "controller/EventDispatcher.h" -#include "event/BlockEventManager.h" +#include "file_server/EventDispatcher.h" +#include "file_server/event/BlockEventManager.h" +#include "file_server/ConfigManager.h" #include "file_server/FileServer.h" #include "fuse/FuseFileBlacklist.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" -#include "processor/daemon/LogProcess.h" -#include "queue/ProcessQueueManager.h" +#include "runner/LogProcess.h" +#include "pipeline/queue/ProcessQueueManager.h" using namespace std; using namespace sls_logs; diff --git a/core/event_handler/EventHandler.h b/core/file_server/event_handler/EventHandler.h similarity index 99% rename from core/event_handler/EventHandler.h rename to core/file_server/event_handler/EventHandler.h index 9556571e5b..6cf3655dec 100644 --- a/core/event_handler/EventHandler.h +++ b/core/file_server/event_handler/EventHandler.h @@ -21,7 +21,7 @@ #include #include -#include "reader/LogFileReader.h" +#include "file_server/reader/LogFileReader.h" namespace logtail { diff --git a/core/event_handler/HistoryFileImporter.cpp b/core/file_server/event_handler/HistoryFileImporter.cpp similarity index 95% rename from core/event_handler/HistoryFileImporter.cpp rename to core/file_server/event_handler/HistoryFileImporter.cpp index 37d16c4d1b..95dbc89cd7 100644 --- a/core/event_handler/HistoryFileImporter.cpp +++ b/core/file_server/event_handler/HistoryFileImporter.cpp @@ -19,12 +19,11 @@ #include "common/RuntimeUtil.h" #include "common/Thread.h" #include "common/TimeUtil.h" -#include "config_manager/ConfigManager.h" +#include "file_server/ConfigManager.h" #include "logger/Logger.h" -#include "processor/daemon/LogProcess.h" -#include "queue/ProcessQueueManager.h" -#include "reader/LogFileReader.h" -#include "app_config/AppConfig.h" +#include "runner/LogProcess.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "file_server/reader/LogFileReader.h" namespace logtail { @@ -114,8 +113,9 @@ void HistoryFileImporter::ProcessEvent(const HistoryFileEvent& event, const std: logBuffer->logFileReader = readerSharePtr; PipelineEventGroup group = LogFileReader::GenerateEventGroup(readerSharePtr, logBuffer.get()); - - // TODO: currently only 1 input is allowed, so we assume 0 here. It should be the actual input seq after refactorization. + + // TODO: currently only 1 input is allowed, so we assume 0 here. It should be the actual input seq after + // refactorization. logProcess->PushBuffer(readerSharePtr->GetQueueKey(), 0, std::move(group), 100000000); } else { // when ReadLog return false, retry once diff --git a/core/event_handler/HistoryFileImporter.h b/core/file_server/event_handler/HistoryFileImporter.h similarity index 98% rename from core/event_handler/HistoryFileImporter.h rename to core/file_server/event_handler/HistoryFileImporter.h index f10728a991..e74df48b6b 100644 --- a/core/event_handler/HistoryFileImporter.h +++ b/core/file_server/event_handler/HistoryFileImporter.h @@ -20,7 +20,7 @@ #include "common/StringTools.h" #include "common/CircularBuffer.h" #include "common/Thread.h" -#include "input/InputFile.h" +#include "plugin/input/InputFile.h" namespace logtail { diff --git a/core/event_handler/LogInput.cpp b/core/file_server/event_handler/LogInput.cpp similarity index 96% rename from core/event_handler/LogInput.cpp rename to core/file_server/event_handler/LogInput.cpp index bd2f7ab3a5..c48d77a3cf 100644 --- a/core/event_handler/LogInput.cpp +++ b/core/file_server/event_handler/LogInput.cpp @@ -16,8 +16,8 @@ #include -#include "EventHandler.h" -#include "HistoryFileImporter.h" +#include "file_server/event_handler/EventHandler.h" +#include "file_server/event_handler/HistoryFileImporter.h" #include "app_config/AppConfig.h" #include "application/Application.h" #include "checkpoint/CheckPointManager.h" @@ -27,19 +27,18 @@ #include "common/RuntimeUtil.h" #include "common/StringTools.h" #include "common/TimeUtil.h" -#include "config_manager/ConfigManager.h" -#include "controller/EventDispatcher.h" -#include "event/BlockEventManager.h" +#include "file_server/EventDispatcher.h" +#include "file_server/event/BlockEventManager.h" +#include "file_server/ConfigManager.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" #include "monitor/Monitor.h" -#include "polling/PollingCache.h" -#include "polling/PollingDirFile.h" -#include "polling/PollingEventQueue.h" -#include "polling/PollingModify.h" -#include "processor/daemon/LogProcess.h" -#include "reader/GloablFileDescriptorManager.h" -#include "reader/LogFileReader.h" +#include "file_server/polling/PollingCache.h" +#include "file_server/polling/PollingDirFile.h" +#include "file_server/polling/PollingEventQueue.h" +#include "file_server/polling/PollingModify.h" +#include "file_server/reader/GloablFileDescriptorManager.h" +#include "file_server/reader/LogFileReader.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif @@ -90,7 +89,8 @@ void LogInput::Start() { mInteruptFlag = false; mGlobalOpenFdTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_OPEN_FD_TOTAL); - mGlobalRegisterHandlerTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_REGISTER_HANDLER_TOTAL); + mGlobalRegisterHandlerTotal + = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_REGISTER_HANDLER_TOTAL); new Thread([this]() { ProcessLoop(); }); } diff --git a/core/event_handler/LogInput.h b/core/file_server/event_handler/LogInput.h similarity index 100% rename from core/event_handler/LogInput.h rename to core/file_server/event_handler/LogInput.h diff --git a/core/event_listener/EventListener.h b/core/file_server/event_listener/EventListener.h similarity index 84% rename from core/event_listener/EventListener.h rename to core/file_server/event_listener/EventListener.h index f720ab5f12..0f9a3fa23a 100644 --- a/core/event_listener/EventListener.h +++ b/core/file_server/event_listener/EventListener.h @@ -17,7 +17,7 @@ #pragma once #if defined(__linux__) -#include "EventListener_Linux.h" +#include "file_server/event_listener/EventListener_Linux.h" #elif defined(_MSC_VER) -#include "EventListener_Windows.h" +#include "file_server/event_listener/EventListener_Windows.h" #endif diff --git a/core/event_listener/EventListener_Linux.cpp b/core/file_server/event_listener/EventListener_Linux.cpp similarity index 98% rename from core/event_listener/EventListener_Linux.cpp rename to core/file_server/event_listener/EventListener_Linux.cpp index 30606ecd98..849aa7616d 100644 --- a/core/event_listener/EventListener_Linux.cpp +++ b/core/file_server/event_listener/EventListener_Linux.cpp @@ -20,8 +20,8 @@ #include "monitor/LogtailAlarm.h" #include "common/ErrorUtil.h" #include "common/Flags.h" -#include "controller/EventDispatcher.h" -#include "event_handler/LogInput.h" +#include "file_server/EventDispatcher.h" +#include "file_server/event_handler/LogInput.h" DEFINE_FLAG_BOOL(fs_events_inotify_enable, "", true); diff --git a/core/event_listener/EventListener_Linux.h b/core/file_server/event_listener/EventListener_Linux.h similarity index 97% rename from core/event_listener/EventListener_Linux.h rename to core/file_server/event_listener/EventListener_Linux.h index 5ecaaad4c7..c659b9bfb7 100644 --- a/core/event_listener/EventListener_Linux.h +++ b/core/file_server/event_listener/EventListener_Linux.h @@ -19,7 +19,7 @@ #include #include -#include "event/Event.h" +#include "file_server/event/Event.h" namespace logtail { diff --git a/core/event_listener/EventListener_Windows.cpp b/core/file_server/event_listener/EventListener_Windows.cpp similarity index 100% rename from core/event_listener/EventListener_Windows.cpp rename to core/file_server/event_listener/EventListener_Windows.cpp diff --git a/core/event_listener/EventListener_Windows.h b/core/file_server/event_listener/EventListener_Windows.h similarity index 100% rename from core/event_listener/EventListener_Windows.h rename to core/file_server/event_listener/EventListener_Windows.h diff --git a/core/polling/PollingCache.cpp b/core/file_server/polling/PollingCache.cpp similarity index 96% rename from core/polling/PollingCache.cpp rename to core/file_server/polling/PollingCache.cpp index 9bab54b7d3..e0f7c3806b 100644 --- a/core/polling/PollingCache.cpp +++ b/core/file_server/polling/PollingCache.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "PollingCache.h" +#include "file_server/polling/PollingCache.h" #include "common/Flags.h" DEFINE_FLAG_INT32(max_file_not_exist_times, "treate as deleted when file stat failed XX times, default", 10); diff --git a/core/polling/PollingCache.h b/core/file_server/polling/PollingCache.h similarity index 100% rename from core/polling/PollingCache.h rename to core/file_server/polling/PollingCache.h diff --git a/core/polling/PollingDirFile.cpp b/core/file_server/polling/PollingDirFile.cpp similarity index 99% rename from core/polling/PollingDirFile.cpp rename to core/file_server/polling/PollingDirFile.cpp index 53e27ffc5a..4905bcd8d9 100644 --- a/core/polling/PollingDirFile.cpp +++ b/core/file_server/polling/PollingDirFile.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "PollingDirFile.h" +#include "file_server/polling/PollingDirFile.h" #if defined(__linux__) #include #include @@ -21,16 +21,16 @@ #endif #include -#include "PollingEventQueue.h" -#include "PollingModify.h" +#include "file_server/polling/PollingEventQueue.h" +#include "file_server/polling/PollingModify.h" #include "app_config/AppConfig.h" #include "common/ErrorUtil.h" #include "common/FileSystemUtil.h" #include "common/Flags.h" #include "common/StringTools.h" #include "common/TimeUtil.h" -#include "config_manager/ConfigManager.h" -#include "event/Event.h" +#include "file_server/ConfigManager.h" +#include "file_server/event/Event.h" #include "file_server/FileServer.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" diff --git a/core/polling/PollingDirFile.h b/core/file_server/polling/PollingDirFile.h similarity index 99% rename from core/polling/PollingDirFile.h rename to core/file_server/polling/PollingDirFile.h index 4f83b5e0c4..b9af8c8988 100644 --- a/core/polling/PollingDirFile.h +++ b/core/file_server/polling/PollingDirFile.h @@ -17,7 +17,7 @@ #pragma once #include -#include "PollingCache.h" +#include "file_server/polling/PollingCache.h" #include "common/Lock.h" #include "common/LogRunnable.h" #include "common/Thread.h" diff --git a/core/polling/PollingEventQueue.cpp b/core/file_server/polling/PollingEventQueue.cpp similarity index 98% rename from core/polling/PollingEventQueue.cpp rename to core/file_server/polling/PollingEventQueue.cpp index f3413a3fc2..ebab605cf1 100644 --- a/core/polling/PollingEventQueue.cpp +++ b/core/file_server/polling/PollingEventQueue.cpp @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "PollingEventQueue.h" +#include "file_server/polling/PollingEventQueue.h" #include "common/StringTools.h" #include "common/Flags.h" #include "common/TimeUtil.h" -#include "event/Event.h" +#include "file_server/event/Event.h" #include "logger/Logger.h" DEFINE_FLAG_INT32(max_polling_event_queue_size, "max polling event queue size", 10000); diff --git a/core/polling/PollingEventQueue.h b/core/file_server/polling/PollingEventQueue.h similarity index 100% rename from core/polling/PollingEventQueue.h rename to core/file_server/polling/PollingEventQueue.h diff --git a/core/polling/PollingModify.cpp b/core/file_server/polling/PollingModify.cpp similarity index 98% rename from core/polling/PollingModify.cpp rename to core/file_server/polling/PollingModify.cpp index d035728a91..4b6e4c0995 100644 --- a/core/polling/PollingModify.cpp +++ b/core/file_server/polling/PollingModify.cpp @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "PollingModify.h" +#include "file_server/polling/PollingModify.h" -#include "PollingEventQueue.h" +#include "file_server/polling/PollingEventQueue.h" #if defined(__linux__) #include #endif @@ -24,7 +24,7 @@ #include "common/Flags.h" #include "common/StringTools.h" #include "common/TimeUtil.h" -#include "event/Event.h" +#include "file_server/event/Event.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" diff --git a/core/polling/PollingModify.h b/core/file_server/polling/PollingModify.h similarity index 98% rename from core/polling/PollingModify.h rename to core/file_server/polling/PollingModify.h index 959c19515e..9be9827ffc 100644 --- a/core/polling/PollingModify.h +++ b/core/file_server/polling/PollingModify.h @@ -19,7 +19,7 @@ #include #include -#include "PollingCache.h" +#include "file_server/polling/PollingCache.h" #include "common/Lock.h" #include "common/LogRunnable.h" #include "common/Thread.h" diff --git a/core/reader/FileReaderOptions.cpp b/core/file_server/reader/FileReaderOptions.cpp similarity index 92% rename from core/reader/FileReaderOptions.cpp rename to core/file_server/reader/FileReaderOptions.cpp index 9d0ffb9dc4..bb199a7daa 100644 --- a/core/reader/FileReaderOptions.cpp +++ b/core/file_server/reader/FileReaderOptions.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "reader/FileReaderOptions.h" +#include "file_server/reader/FileReaderOptions.h" #include "common/FileSystemUtil.h" #include "common/Flags.h" @@ -30,7 +30,7 @@ DEFINE_FLAG_INT32(default_tail_limit_kb, "when first open file, if offset little than this value, move offset to beginning, KB", 1024 * 50); #endif -DEFINE_FLAG_INT32(default_reader_flush_timeout, "", 5); +DEFINE_FLAG_INT32(default_reader_flush_timeout, "", 60); DEFINE_FLAG_INT32(delay_bytes_upperlimit, "if (total_file_size - current_readed_size) exceed uppperlimit, send READ_LOG_DELAY_ALARM, bytes", 200 * 1024 * 1024); @@ -47,7 +47,7 @@ FileReaderOptions::FileReaderOptions() mRotatorQueueSize(static_cast(INT32_FLAG(logreader_max_rotate_queue_size))) { } -bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginName) { +bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginType) { string errorMsg; // FileEncoding @@ -56,7 +56,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c PARAM_ERROR_RETURN(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -71,7 +71,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c PARAM_ERROR_RETURN(ctx.GetLogger(), ctx.GetAlarm(), "string param FileEncoding is not valid", - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -84,7 +84,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), errorMsg, mTailingAllMatchedFiles, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -98,7 +98,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), errorMsg, mTailSizeKB, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -108,7 +108,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), "uint param TailSizeKB is larger than 104857600", mTailSizeKB, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -123,7 +123,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), errorMsg, mFlushTimeoutSecs, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -136,7 +136,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), errorMsg, mReadDelaySkipThresholdBytes, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -149,7 +149,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), errorMsg, mReadDelayAlertThresholdBytes, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -162,7 +162,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), errorMsg, mCloseUnusedReaderIntervalSec, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -175,7 +175,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), errorMsg, mRotatorQueueSize, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -188,7 +188,7 @@ bool FileReaderOptions::Init(const Json::Value& config, const PipelineContext& c ctx.GetAlarm(), errorMsg, mAppendingLogPositionMeta, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), diff --git a/core/reader/FileReaderOptions.h b/core/file_server/reader/FileReaderOptions.h similarity index 97% rename from core/reader/FileReaderOptions.h rename to core/file_server/reader/FileReaderOptions.h index 7e98d04483..fa5f5dcd23 100644 --- a/core/reader/FileReaderOptions.h +++ b/core/file_server/reader/FileReaderOptions.h @@ -45,7 +45,7 @@ struct FileReaderOptions { FileReaderOptions(); - bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginName); + bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginType); }; using FileReaderConfig = std::pair; diff --git a/core/reader/GloablFileDescriptorManager.h b/core/file_server/reader/GloablFileDescriptorManager.h similarity index 100% rename from core/reader/GloablFileDescriptorManager.h rename to core/file_server/reader/GloablFileDescriptorManager.h diff --git a/core/reader/JsonLogFileReader.cpp b/core/file_server/reader/JsonLogFileReader.cpp similarity index 98% rename from core/reader/JsonLogFileReader.cpp rename to core/file_server/reader/JsonLogFileReader.cpp index 0796b1abca..17f9b201d7 100644 --- a/core/reader/JsonLogFileReader.cpp +++ b/core/file_server/reader/JsonLogFileReader.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "reader/JsonLogFileReader.h" +#include "file_server/reader/JsonLogFileReader.h" #include "logger/Logger.h" diff --git a/core/reader/JsonLogFileReader.h b/core/file_server/reader/JsonLogFileReader.h similarity index 97% rename from core/reader/JsonLogFileReader.h rename to core/file_server/reader/JsonLogFileReader.h index c8bbc8a3e9..8d958c907c 100644 --- a/core/reader/JsonLogFileReader.h +++ b/core/file_server/reader/JsonLogFileReader.h @@ -16,7 +16,7 @@ #pragma once -#include "reader/LogFileReader.h" +#include "file_server/reader/LogFileReader.h" namespace logtail { diff --git a/core/reader/LogFileReader.cpp b/core/file_server/reader/LogFileReader.cpp similarity index 99% rename from core/reader/LogFileReader.cpp rename to core/file_server/reader/LogFileReader.cpp index 1ccce1a6b5..098eb176de 100644 --- a/core/reader/LogFileReader.cpp +++ b/core/file_server/reader/LogFileReader.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "LogFileReader.h" +#include "file_server/reader/LogFileReader.h" #if defined(_MSC_VER) #include @@ -28,7 +28,7 @@ #include #include -#include "GloablFileDescriptorManager.h" +#include "file_server/reader/GloablFileDescriptorManager.h" #include "app_config/AppConfig.h" #include "checkpoint/CheckPointManager.h" #include "checkpoint/CheckpointManagerV2.h" @@ -40,21 +40,21 @@ #include "common/RandomUtil.h" #include "common/TimeUtil.h" #include "common/UUIDUtil.h" -#include "config_manager/ConfigManager.h" -#include "event/BlockEventManager.h" -#include "event_handler/LogInput.h" +#include "file_server/ConfigManager.h" +#include "file_server/event/BlockEventManager.h" +#include "file_server/event_handler/LogInput.h" #include "file_server/FileServer.h" #include "fuse/UlogfsHandler.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "monitor/MetricConstants.h" -#include "processor/inner/ProcessorParseContainerLogNative.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKeyManager.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" #include "rapidjson/document.h" -#include "reader/JsonLogFileReader.h" +#include "file_server/reader/JsonLogFileReader.h" #include "sdk/Common.h" using namespace sls_logs; diff --git a/core/reader/LogFileReader.h b/core/file_server/reader/LogFileReader.h similarity index 99% rename from core/reader/LogFileReader.h rename to core/file_server/reader/LogFileReader.h index 97770706fc..cbca3ef1ab 100644 --- a/core/reader/LogFileReader.h +++ b/core/file_server/reader/LogFileReader.h @@ -31,16 +31,16 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" #include "common/memory/SourceBuffer.h" -#include "event/Event.h" +#include "file_server/event/Event.h" #include "file_server/FileDiscoveryOptions.h" #include "file_server/FileServer.h" #include "file_server/MultilineOptions.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "logger/Logger.h" #include "models/StringView.h" -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" #include "rapidjson/allocators.h" -#include "reader/FileReaderOptions.h" +#include "file_server/reader/FileReaderOptions.h" namespace logtail { diff --git a/core/go_pipeline/LogtailPlugin.cpp b/core/go_pipeline/LogtailPlugin.cpp index 7bd953804e..ca2d640b03 100644 --- a/core/go_pipeline/LogtailPlugin.cpp +++ b/core/go_pipeline/LogtailPlugin.cpp @@ -22,14 +22,15 @@ #include "common/JsonUtil.h" #include "common/LogtailCommonFlags.h" #include "common/TimeUtil.h" -#include "config_manager/ConfigManager.h" +#include "pipeline/compression/CompressorFactory.h" #include "container_manager/ConfigContainerInfoUpdateCmd.h" +#include "file_server/ConfigManager.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "pipeline/PipelineManager.h" #include "profile_sender/ProfileSender.h" -#include "queue/SenderQueueManager.h" +#include "pipeline/queue/SenderQueueManager.h" DEFINE_FLAG_BOOL(enable_sls_metrics_format, "if enable format metrics in SLS metricstore log pattern", false); DEFINE_FLAG_BOOL(enable_containerd_upper_dir_detect, @@ -53,10 +54,16 @@ LogtailPlugin::LogtailPlugin() { mPluginValid = false; mPluginAlarmConfig.mLogstore = "logtail_alarm"; mPluginAlarmConfig.mAliuid = STRING_FLAG(logtail_profile_aliuid); + mPluginAlarmConfig.mCompressor + = CompressorFactory::GetInstance()->Create(Json::Value(), PipelineContext(), "flusher_sls", CompressType::ZSTD); mPluginProfileConfig.mLogstore = "shennong_log_profile"; mPluginProfileConfig.mAliuid = STRING_FLAG(logtail_profile_aliuid); + mPluginProfileConfig.mCompressor + = CompressorFactory::GetInstance()->Create(Json::Value(), PipelineContext(), "flusher_sls", CompressType::ZSTD); mPluginContainerConfig.mLogstore = "logtail_containers"; mPluginContainerConfig.mAliuid = STRING_FLAG(logtail_profile_aliuid); + mPluginContainerConfig.mCompressor + = CompressorFactory::GetInstance()->Create(Json::Value(), PipelineContext(), "flusher_sls", CompressType::ZSTD); mPluginCfg["LogtailSysConfDir"] = AppConfig::GetInstance()->GetLogtailSysConfDir(); mPluginCfg["HostIP"] = LogFileProfiler::mIpAddr; @@ -145,6 +152,12 @@ void LogtailPlugin::Start(const std::string& configName) { } int LogtailPlugin::IsValidToSend(long long logstoreKey) { + // TODO: because go profile pipeline is not controlled by C++, we cannot know queue key in advance + // therefore, we assume true here. This could be a potential problem if network is not available for profile info. + // However, since go profile pipeline will be stopped only during process exit, it should be fine. + if (logstoreKey == -1) { + return true; + } return SenderQueueManager::GetInstance()->IsValidToPush(logstoreKey) ? 0 : -1; } diff --git a/core/go_pipeline/LogtailPlugin.h b/core/go_pipeline/LogtailPlugin.h index 94e7a78c4a..990b35cf8f 100644 --- a/core/go_pipeline/LogtailPlugin.h +++ b/core/go_pipeline/LogtailPlugin.h @@ -28,8 +28,8 @@ #include #include -#include "flusher/sls/FlusherSLS.h" -#include "log_pb/sls_logs.pb.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "protobuf/sls/sls_logs.pb.h" extern "C" { // The definition of Golang type is copied from PluginAdaptor.h that diff --git a/core/helper/LogtailInsight.cpp b/core/helper/LogtailInsight.cpp index 63a6b42d4a..8cdcfb71dd 100644 --- a/core/helper/LogtailInsight.cpp +++ b/core/helper/LogtailInsight.cpp @@ -22,7 +22,7 @@ #include #include #include -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "common/LogtailCommonFlags.h" #include "common/TimeUtil.h" diff --git a/core/models/MetricEvent.cpp b/core/models/MetricEvent.cpp index 4a162640d7..61cf34c05e 100644 --- a/core/models/MetricEvent.cpp +++ b/core/models/MetricEvent.cpp @@ -32,6 +32,10 @@ void MetricEvent::SetName(const string& name) { mName = StringView(b.data, b.size); } +void MetricEvent::SetNameNoCopy(StringView name) { + mName = name; +} + StringView MetricEvent::GetTag(StringView key) const { auto it = mTags.mInner.find(key); if (it != mTags.mInner.end()) { diff --git a/core/models/MetricEvent.h b/core/models/MetricEvent.h index 333b1eed89..2b976f351e 100644 --- a/core/models/MetricEvent.h +++ b/core/models/MetricEvent.h @@ -34,6 +34,7 @@ class MetricEvent : public PipelineEvent { StringView GetName() const { return mName; } void SetName(const std::string& name); + void SetNameNoCopy(StringView name); template bool Is() const { diff --git a/core/models/PipelineEventGroup.cpp b/core/models/PipelineEventGroup.cpp index e8457cbcd9..4989c7c229 100644 --- a/core/models/PipelineEventGroup.cpp +++ b/core/models/PipelineEventGroup.cpp @@ -20,7 +20,7 @@ #include "common/HashUtil.h" #include "logger/Logger.h" -#include "processor/inner/ProcessorParseContainerLogNative.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" using namespace std; diff --git a/core/models/PipelineEventGroup.h b/core/models/PipelineEventGroup.h index fa2461d274..a64325940e 100644 --- a/core/models/PipelineEventGroup.h +++ b/core/models/PipelineEventGroup.h @@ -51,6 +51,13 @@ enum class EventGroupMetaKey { CONTAINER_IMAGE_NAME, CONTAINER_IMAGE_ID, + PROMETHEUS_SCRAPE_DURATION, + PROMETHEUS_SCRAPE_RESPONSE_SIZE, + PROMETHEUS_SAMPLES_SCRAPED, + PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, + PROMETHEUS_INSTANCE, + PROMETHEUS_UP_STATE, + SOURCE_ID }; diff --git a/core/monitor/LogFileProfiler.cpp b/core/monitor/LogFileProfiler.cpp index e5580410da..701cd03a33 100644 --- a/core/monitor/LogFileProfiler.cpp +++ b/core/monitor/LogFileProfiler.cpp @@ -25,10 +25,10 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" #include "common/version.h" -#include "config_manager/ConfigManager.h" +#include "file_server/ConfigManager.h" #include "logger/Logger.h" #include "profile_sender/ProfileSender.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/QueueKeyManager.h" DEFINE_FLAG_INT32(profile_data_send_interval, "interval of send LogFile/DomainSocket profile data, seconds", 600); DEFINE_FLAG_STRING(logtail_profile_snapshot, "reader profile on local disk", "logtail_profile_snapshot"); diff --git a/core/monitor/LogFileProfiler.h b/core/monitor/LogFileProfiler.h index 971956de20..fd29c2f185 100644 --- a/core/monitor/LogFileProfiler.h +++ b/core/monitor/LogFileProfiler.h @@ -21,7 +21,7 @@ #include #include #include -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" namespace sls_logs { class LogGroup; diff --git a/core/monitor/LogtailAlarm.cpp b/core/monitor/LogtailAlarm.cpp index 99a99cbc82..cc601b7394 100644 --- a/core/monitor/LogtailAlarm.cpp +++ b/core/monitor/LogtailAlarm.cpp @@ -22,11 +22,10 @@ #include "common/Thread.h" #include "common/TimeUtil.h" #include "common/version.h" -#include "config_manager/ConfigManager.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "profile_sender/ProfileSender.h" -#include "queue/QueueKeyManager.h" -#include "queue/SenderQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SenderQueueManager.h" DEFINE_FLAG_INT32(logtail_alarm_interval, "the interval of two same type alarm message", 30); DEFINE_FLAG_INT32(logtail_low_level_alarm_speed, "the speed(count/second) which logtail's low level alarm allow", 100); diff --git a/core/monitor/LogtailMetric.cpp b/core/monitor/LogtailMetric.cpp index c21ed9d6d0..b0211b32a6 100644 --- a/core/monitor/LogtailMetric.cpp +++ b/core/monitor/LogtailMetric.cpp @@ -194,7 +194,7 @@ void WriteMetrics::PreparePluginCommonLabels(const std::string& projectName, const std::string& logstoreName, const std::string& region, const std::string& configName, - const std::string& pluginName, + const std::string& pluginType, const std::string& pluginID, const std::string& nodeID, const std::string& childNodeID, @@ -203,7 +203,7 @@ void WriteMetrics::PreparePluginCommonLabels(const std::string& projectName, labels.emplace_back(std::make_pair(METRIC_LABEL_LOGSTORE, logstoreName)); labels.emplace_back(std::make_pair(METRIC_LABEL_REGION, region)); labels.emplace_back(std::make_pair(METRIC_LABEL_CONFIG_NAME, configName)); - labels.emplace_back(std::make_pair(METRIC_LABEL_PLUGIN_NAME, pluginName)); + labels.emplace_back(std::make_pair(METRIC_LABEL_PLUGIN_NAME, pluginType)); labels.emplace_back(std::make_pair(METRIC_LABEL_PLUGIN_ID, pluginID)); labels.emplace_back(std::make_pair(METRIC_LABEL_NODE_ID, nodeID)); labels.emplace_back(std::make_pair(METRIC_LABEL_CHILD_NODE_ID, childNodeID)); diff --git a/core/monitor/LogtailMetric.h b/core/monitor/LogtailMetric.h index 211ab91a05..7c3d6abe98 100644 --- a/core/monitor/LogtailMetric.h +++ b/core/monitor/LogtailMetric.h @@ -20,7 +20,7 @@ #include "LoongCollectorMetricTypes.h" #include "common/Lock.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { @@ -108,7 +108,7 @@ class WriteMetrics { const std::string& logstoreName, const std::string& region, const std::string& configName, - const std::string& pluginName, + const std::string& pluginType, const std::string& pluginID, const std::string& nodeID, const std::string& childNodeID, diff --git a/core/monitor/LoongCollectorMetricTypes.h b/core/monitor/LoongCollectorMetricTypes.h index 4d21106e4e..e815946ce8 100644 --- a/core/monitor/LoongCollectorMetricTypes.h +++ b/core/monitor/LoongCollectorMetricTypes.h @@ -19,7 +19,7 @@ #include #include "common/Lock.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { diff --git a/core/monitor/MetricExportor.cpp b/core/monitor/MetricExportor.cpp index 59a5d208b8..153fe621ca 100644 --- a/core/monitor/MetricExportor.cpp +++ b/core/monitor/MetricExportor.cpp @@ -23,9 +23,8 @@ #include "common/FileSystemUtil.h" #include "common/RuntimeUtil.h" #include "common/TimeUtil.h" -#include "config_manager/ConfigManager.h" #include "go_pipeline/LogtailPlugin.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "pipeline/PipelineManager.h" using namespace sls_logs; diff --git a/core/monitor/Monitor.cpp b/core/monitor/Monitor.cpp index 776318d786..2af97b0cf3 100644 --- a/core/monitor/Monitor.cpp +++ b/core/monitor/Monitor.cpp @@ -32,15 +32,14 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" #include "common/version.h" -#include "config_manager/ConfigManager.h" -#include "event_handler/LogInput.h" -#include "flusher/sls/FlusherSLS.h" +#include "file_server/event_handler/LogInput.h" +#include "plugin/flusher/sls/FlusherSLS.h" #include "go_pipeline/LogtailPlugin.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" -#include "sender/FlusherRunner.h" +#include "runner/FlusherRunner.h" #if defined(__linux__) && !defined(__ANDROID__) #include "ObserverManager.h" #endif @@ -50,6 +49,7 @@ #include "config/provider/EnterpriseConfigProvider.h" #endif #include "pipeline/PipelineManager.h" +#include "profile_sender/ProfileSender.h" using namespace std; using namespace sls_logs; @@ -177,11 +177,10 @@ void LogtailMonitor::Monitor() { lastCheckHardLimitTime = monitorTime; GetMemStat(); - CalCpuStat(curCpuStat, mCpuStat); - if (CheckHardCpuLimit() || CheckHardMemLimit()) { + if (CheckHardMemLimit()) { LOG_ERROR(sLogger, ("Resource used by program exceeds hard limit", - "prepare restart Logtail")("cpu_usage", mCpuStat.mCpuUsage)("mem_rss", mMemStat.mRss)); + "prepare restart Logtail")("mem_rss", mMemStat.mRss)); Suicide(); } } @@ -470,15 +469,8 @@ bool LogtailMonitor::CheckSoftMemLimit() { return false; } -bool LogtailMonitor::CheckHardCpuLimit() { - float cpuUsageLimit = AppConfig::GetInstance()->IsResourceAutoScale() - ? AppConfig::GetInstance()->GetScaledCpuUsageUpLimit() - : AppConfig::GetInstance()->GetCpuUsageUpLimit(); - return mCpuStat.mCpuUsage > 10 * cpuUsageLimit; -} - bool LogtailMonitor::CheckHardMemLimit() { - return mMemStat.mRss > 10 * AppConfig::GetInstance()->GetMemUsageUpLimit(); + return mMemStat.mRss > 5 * AppConfig::GetInstance()->GetMemUsageUpLimit(); } void LogtailMonitor::DumpToLocal(const sls_logs::LogGroup& logGroup) { @@ -557,6 +549,13 @@ std::string LogtailMonitor::GetLoadAvg() { return loadStr; } +uint32_t LogtailMonitor::GetCpuCores() { + if (!CalCpuCores()) { + return 0; + } + return mCpuCores; +} + // Get the number of cores in CPU. bool LogtailMonitor::CalCpuCores() { ifstream fin; @@ -713,8 +712,7 @@ void LoongCollectorMonitor::Init() { labels.emplace_back(METRIC_LABEL_UUID, Application::GetInstance()->GetUUID()); labels.emplace_back(METRIC_LABEL_VERSION, ILOGTAIL_VERSION); DynamicMetricLabels dynamicLabels; - dynamicLabels.emplace_back(METRIC_LABEL_PROJECTS, - []() -> std::string { return FlusherSLS::GetAllProjects(); }); + dynamicLabels.emplace_back(METRIC_LABEL_PROJECTS, []() -> std::string { return FlusherSLS::GetAllProjects(); }); #ifdef __ENTERPRISE__ dynamicLabels.emplace_back(METRIC_LABEL_ALIUIDS, []() -> std::string { return EnterpriseConfigProvider::GetInstance()->GetAliuidSet(); }); @@ -738,10 +736,14 @@ void LoongCollectorMonitor::Init() { = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_POLLING_MODIFY_SIZE_TOTAL); mIntGauges[METRIC_AGENT_REGISTER_HANDLER_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_REGISTER_HANDLER_TOTAL); - // mIntGauges[METRIC_AGENT_INSTANCE_CONFIG_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_INSTANCE_CONFIG_TOTAL); - mIntGauges[METRIC_AGENT_PIPELINE_CONFIG_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_PIPELINE_CONFIG_TOTAL); - // mIntGauges[METRIC_AGENT_ENV_PIPELINE_CONFIG_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_ENV_PIPELINE_CONFIG_TOTAL); - // mIntGauges[METRIC_AGENT_CRD_PIPELINE_CONFIG_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_CRD_PIPELINE_CONFIG_TOTAL); + // mIntGauges[METRIC_AGENT_INSTANCE_CONFIG_TOTAL] = + // mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_INSTANCE_CONFIG_TOTAL); + mIntGauges[METRIC_AGENT_PIPELINE_CONFIG_TOTAL] + = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_PIPELINE_CONFIG_TOTAL); + // mIntGauges[METRIC_AGENT_ENV_PIPELINE_CONFIG_TOTAL] = + // mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_ENV_PIPELINE_CONFIG_TOTAL); + // mIntGauges[METRIC_AGENT_CRD_PIPELINE_CONFIG_TOTAL] = + // mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_CRD_PIPELINE_CONFIG_TOTAL); // mIntGauges[METRIC_AGENT_CONSOLE_PIPELINE_CONFIG_TOTAL] // = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_CONSOLE_PIPELINE_CONFIG_TOTAL); // mIntGauges[METRIC_AGENT_PLUGIN_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_PLUGIN_TOTAL); diff --git a/core/monitor/Monitor.h b/core/monitor/Monitor.h index 19a9d83716..cb8744442c 100644 --- a/core/monitor/Monitor.h +++ b/core/monitor/Monitor.h @@ -24,7 +24,6 @@ #include "LogtailMetric.h" #include "MetricConstants.h" #include "MetricStore.h" -#include "profile_sender/ProfileSender.h" #if defined(_MSC_VER) #include #endif @@ -88,6 +87,8 @@ class LogtailMonitor : public MetricStore { bool Init(); void Stop(); + uint32_t GetCpuCores(); + // GetRealtimeCpuLevel return a value to indicates current CPU usage level. // LogInput use it to do flow control. float GetRealtimeCpuLevel() { return mRealtimeCpuStat.mCpuUsage / mScaledCpuUsageUpLimit; } @@ -114,7 +115,6 @@ class LogtailMonitor : public MetricStore { // @return true if the memory usage exceeds limit continuously. bool CheckSoftMemLimit(); - bool CheckHardCpuLimit(); bool CheckHardMemLimit(); // SendStatusProfile collects status profile and send them to server. @@ -172,7 +172,7 @@ class LogtailMonitor : public MetricStore { float mScaledCpuUsageUpLimit; #if defined(__linux__) const static int32_t CPU_STAT_FOR_SCALE_ARRAY_SIZE = 2; - int32_t mCpuCores; + int32_t mCpuCores = 0; CpuStat mCpuStatForScale; OsCpuStat mOsCpuStatForScale; // mCpuArrayForScale and mOsCpuArrayForScale store lastest two CPU usage of diff --git a/core/observer/interface/helper.h b/core/observer/interface/helper.h index f6b18f596d..a230f88994 100644 --- a/core/observer/interface/helper.h +++ b/core/observer/interface/helper.h @@ -19,7 +19,7 @@ #include "network.h" #include #include -#include +#include #include #include #include diff --git a/core/observer/network/NetworkConfig.cpp b/core/observer/network/NetworkConfig.cpp index 3ea711b32f..6b454ed118 100644 --- a/core/observer/network/NetworkConfig.cpp +++ b/core/observer/network/NetworkConfig.cpp @@ -19,7 +19,7 @@ #include #include "common/JsonUtil.h" #include "ExceptionBase.h" -#include "input/InputObserverNetwork.h" +#include "plugin/input/InputObserverNetwork.h" DEFINE_FLAG_INT64(sls_observer_network_gc_interval, "SLS Observer NetWork GC interval seconds", 30); DEFINE_FLAG_INT64(sls_observer_network_probe_disable_process_interval, diff --git a/core/observer/network/NetworkObserver.cpp b/core/observer/network/NetworkObserver.cpp index e765839820..003a4471c4 100644 --- a/core/observer/network/NetworkObserver.cpp +++ b/core/observer/network/NetworkObserver.cpp @@ -21,7 +21,6 @@ #include "Monitor.h" #include "ProcessObserver.h" #include "common/LogtailCommonFlags.h" -#include "config_manager/ConfigManager.h" #include "go_pipeline/LogtailPlugin.h" #include "logger/Logger.h" #include "metas/ConnectionMetaManager.h" @@ -33,7 +32,7 @@ #include "config/provider/EnterpriseConfigProvider.h" #endif #include "common/HashUtil.h" -#include "flusher/sls/FlusherSLS.h" +#include "plugin/flusher/sls/FlusherSLS.h" DEFINE_FLAG_INT64(sls_observer_network_ebpf_connection_gc_interval, "SLS Observer NetWork connection gc interval seconds", diff --git a/core/observer/network/NetworkObserver.h b/core/observer/network/NetworkObserver.h index 4cbd2a7488..71dd25397d 100644 --- a/core/observer/network/NetworkObserver.h +++ b/core/observer/network/NetworkObserver.h @@ -21,7 +21,7 @@ #include "NetworkConfig.h" #include #include -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "common/Thread.h" #include "common/Lock.h" #include "common/TimeUtil.h" diff --git a/core/observer/network/protocols/ProtocolEventAggregators.h b/core/observer/network/protocols/ProtocolEventAggregators.h index 734ab4bbe5..d02e1b46a2 100644 --- a/core/observer/network/protocols/ProtocolEventAggregators.h +++ b/core/observer/network/protocols/ProtocolEventAggregators.h @@ -23,7 +23,7 @@ #include "network/protocols/pgsql/type.h" #include #include -#include +#include namespace logtail { diff --git a/core/observer/network/protocols/common.h b/core/observer/network/protocols/common.h index 734c11eee0..4cc68a6132 100644 --- a/core/observer/network/protocols/common.h +++ b/core/observer/network/protocols/common.h @@ -18,7 +18,7 @@ #include "interface/protocol.h" #include -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "interface/helper.h" #include "LogtailAlarm.h" #include "metas/ServiceMetaCache.h" diff --git a/core/pipeline/GlobalConfig.cpp b/core/pipeline/GlobalConfig.cpp index b5ce473e84..0b2a5879b3 100644 --- a/core/pipeline/GlobalConfig.cpp +++ b/core/pipeline/GlobalConfig.cpp @@ -18,7 +18,7 @@ #include "common/ParamExtractor.h" #include "pipeline/PipelineContext.h" -#include "queue/ProcessQueueManager.h" +#include "pipeline/queue/ProcessQueueManager.h" using namespace std; diff --git a/core/pipeline/InstanceConfigManager.cpp b/core/pipeline/InstanceConfigManager.cpp new file mode 100644 index 0000000000..3c231fd1cd --- /dev/null +++ b/core/pipeline/InstanceConfigManager.cpp @@ -0,0 +1,61 @@ +/* + * Copyright 2023 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pipeline/InstanceConfigManager.h" + +#include "config/feedbacker/ConfigFeedbackReceiver.h" + +using namespace std; + +namespace logtail { + +InstanceConfigManager::InstanceConfigManager() { +} + +void InstanceConfigManager::UpdateInstanceConfigs(InstanceConfigDiff& diff) { + for (auto& config : diff.mAdded) { + std::shared_ptr configTmp(new InstanceConfig(config.mName, std::move(config.mDetail))); + mInstanceConfigMap[config.mName] = configTmp; + ConfigFeedbackReceiver::GetInstance().FeedbackInstanceConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED); + } + for (auto& config : diff.mModified) { + std::shared_ptr configTmp(new InstanceConfig(config.mName, std::move(config.mDetail))); + mInstanceConfigMap[config.mName] = configTmp; + ConfigFeedbackReceiver::GetInstance().FeedbackInstanceConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED); + } + for (auto& configName : diff.mRemoved) { + mInstanceConfigMap.erase(configName); + ConfigFeedbackReceiver::GetInstance().FeedbackInstanceConfigStatus(configName, ConfigFeedbackStatus::DELETED); + } +} + +std::shared_ptr InstanceConfigManager::FindConfigByName(const string& configName) const { + auto it = mInstanceConfigMap.find(configName); + if (it != mInstanceConfigMap.end()) { + return it->second; + } + return nullptr; +} + +vector InstanceConfigManager::GetAllConfigNames() const { + vector res; + for (const auto& item : mInstanceConfigMap) { + res.push_back(item.first); + } + return res; +} + +} // namespace logtail diff --git a/core/pipeline/ProcessConfigManager.h b/core/pipeline/InstanceConfigManager.h similarity index 60% rename from core/pipeline/ProcessConfigManager.h rename to core/pipeline/InstanceConfigManager.h index eb5140d9f2..1727ea2c5c 100644 --- a/core/pipeline/ProcessConfigManager.h +++ b/core/pipeline/InstanceConfigManager.h @@ -22,31 +22,31 @@ #include "common/Lock.h" #include "config/ConfigDiff.h" -#include "config/ProcessConfig.h" +#include "config/InstanceConfig.h" namespace logtail { -class ProcessConfigManager { +class InstanceConfigManager { public: - ProcessConfigManager(const ProcessConfigManager&) = delete; - ProcessConfigManager& operator=(const ProcessConfigManager&) = delete; + InstanceConfigManager(const InstanceConfigManager&) = delete; + InstanceConfigManager& operator=(const InstanceConfigManager&) = delete; - static ProcessConfigManager* GetInstance() { - static ProcessConfigManager instance; + static InstanceConfigManager* GetInstance() { + static InstanceConfigManager instance; return &instance; } - void UpdateProcessConfigs(ProcessConfigDiff& diff); - std::shared_ptr FindConfigByName(const std::string& configName) const; + void UpdateInstanceConfigs(InstanceConfigDiff& diff); + std::shared_ptr FindConfigByName(const std::string& configName) const; std::vector GetAllConfigNames() const; private: - ProcessConfigManager(); - ~ProcessConfigManager() = default; - std::map> mProcessConfigMap; + InstanceConfigManager(); + ~InstanceConfigManager() = default; + std::map> mInstanceConfigMap; #ifdef APSARA_UNIT_TEST_MAIN - friend class ProcessConfigManagerUnittest; + friend class InstanceConfigManagerUnittest; friend class CommonConfigProviderUnittest; #endif }; diff --git a/core/pipeline/Pipeline.cpp b/core/pipeline/Pipeline.cpp index e25474aef0..0610563906 100644 --- a/core/pipeline/Pipeline.cpp +++ b/core/pipeline/Pipeline.cpp @@ -19,22 +19,41 @@ #include #include -#include "batch/TimeoutFlushManager.h" +#include "pipeline/batch/TimeoutFlushManager.h" #include "common/Flags.h" #include "common/ParamExtractor.h" -#include "flusher/sls/FlusherSLS.h" +#include "plugin/flusher/sls/FlusherSLS.h" #include "go_pipeline/LogtailPlugin.h" -#include "input/InputFeedbackInterfaceRegistry.h" -#include "plugin/PluginRegistry.h" -#include "processor/ProcessorParseApsaraNative.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKeyManager.h" -#include "queue/SenderQueueManager.h" +#include "plugin/input/InputFeedbackInterfaceRegistry.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "plugin/processor/ProcessorParseApsaraNative.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SenderQueueManager.h" DECLARE_FLAG_INT32(default_plugin_log_queue_size); using namespace std; +namespace { +class AggregatorDefaultConfig { +public: + static AggregatorDefaultConfig& Instance() { + static AggregatorDefaultConfig instance; + return instance; + } + + Json::Value* GetJsonConfig() { return &aggregatorDefault; } + +private: + Json::Value aggregatorDefault; + AggregatorDefaultConfig() { aggregatorDefault["Type"] = "aggregator_default"; } + + AggregatorDefaultConfig(AggregatorDefaultConfig const&) = delete; + void operator=(AggregatorDefaultConfig const&) = delete; +}; +} // namespace + namespace logtail { void AddExtendedGlobalParamToGoPipeline(const Json::Value& extendedParams, Json::Value& pipeline) { @@ -59,20 +78,21 @@ bool Pipeline::Init(PipelineConfig&& config) { const InputContainerStdio* inputContainerStdio = nullptr; bool hasFlusherSLS = false; -#ifdef __ENTERPRISE__ - // to send alarm before flusherSLS is built, a temporary object is made, which will be overriden shortly after. + // to send alarm and init MetricsRecord before flusherSLS is built, a temporary object is made, which will be unique_ptr SLSTmp = make_unique(); - SLSTmp->mProject = config.mProject; - SLSTmp->mLogstore = config.mLogstore; - SLSTmp->mRegion = config.mRegion; - mContext.SetSLSInfo(SLSTmp.get()); -#endif + if (!config.mProject.empty()) { + SLSTmp->mProject = config.mProject; + SLSTmp->mLogstore = config.mLogstore; + SLSTmp->mRegion = config.mRegion; + mContext.SetSLSInfo(SLSTmp.get()); + } mPluginID.store(0); for (size_t i = 0; i < config.mInputs.size(); ++i) { const Json::Value& detail = *config.mInputs[i]; - string name = detail["Type"].asString(); - unique_ptr input = PluginRegistry::GetInstance()->CreateInput(name, GenNextPluginMeta(false)); + string pluginType = detail["Type"].asString(); + unique_ptr input + = PluginRegistry::GetInstance()->CreateInput(pluginType, GenNextPluginMeta(false)); if (input) { Json::Value optionalGoPipeline; if (!input->Init(detail, mContext, i, optionalGoPipeline)) { @@ -83,19 +103,20 @@ bool Pipeline::Init(PipelineConfig&& config) { MergeGoPipeline(optionalGoPipeline, mGoPipelineWithInput); } // for special treatment below - if (name == InputFile::sName) { + if (pluginType == InputFile::sName) { inputFile = static_cast(mInputs[0]->GetPlugin()); - } else if (name == InputContainerStdio::sName) { + } else if (pluginType == InputContainerStdio::sName) { inputContainerStdio = static_cast(mInputs[0]->GetPlugin()); } } else { - AddPluginToGoPipeline(detail, "inputs", mGoPipelineWithInput); + AddPluginToGoPipeline(pluginType, detail, "inputs", mGoPipelineWithInput); } - ++mPluginCntMap["inputs"][name]; + ++mPluginCntMap["inputs"][pluginType]; } for (size_t i = 0; i < config.mProcessors.size(); ++i) { - string name = (*config.mProcessors[i])["Type"].asString(); + const Json::Value& detail = *config.mProcessors[i]; + string pluginType = detail["Type"].asString(); unique_ptr processor = PluginRegistry::GetInstance()->CreateProcessor(name, GenNextPluginMeta(false)); if (processor) { @@ -104,35 +125,44 @@ bool Pipeline::Init(PipelineConfig&& config) { } mProcessorLine.emplace_back(std::move(processor)); // for special treatment of topicformat in apsara mode - if (i == 0 && name == ProcessorParseApsaraNative::sName) { + if (i == 0 && pluginType == ProcessorParseApsaraNative::sName) { mContext.SetIsFirstProcessorApsaraFlag(true); } } else { if (ShouldAddPluginToGoPipelineWithInput()) { AddPluginToGoPipeline(*config.mProcessors[i], "processors", mGoPipelineWithInput); } else { - AddPluginToGoPipeline(*config.mProcessors[i], "processors", mGoPipelineWithoutInput); + AddPluginToGoPipeline(pluginType, detail, "processors", mGoPipelineWithoutInput); } } - ++mPluginCntMap["processors"][name]; + ++mPluginCntMap["processors"][pluginType]; } - for (auto detail : config.mAggregators) { + if (config.mAggregators.empty() && config.IsFlushingThroughGoPipelineExisted()) { + // an aggregator_default plugin will be add to go pipeline when mAggregators is empty and need to send go data + // to cpp flusher. + config.mAggregators.push_back(AggregatorDefaultConfig::Instance().GetJsonConfig()); + } + for (size_t i = 0; i < config.mAggregators.size(); ++i) { + const Json::Value& detail = *config.mAggregators[i]; + string pluginType = detail["Type"].asString(); + GenNextPluginMeta(false); if (ShouldAddPluginToGoPipelineWithInput()) { - AddPluginToGoPipeline(*detail, "aggregators", mGoPipelineWithInput); + AddPluginToGoPipeline(pluginType, detail, "aggregators", mGoPipelineWithInput); } else { - AddPluginToGoPipeline(*detail, "aggregators", mGoPipelineWithoutInput); + AddPluginToGoPipeline(pluginType, detail, "aggregators", mGoPipelineWithoutInput); } - ++mPluginCntMap["aggregators"][(*detail)["Type"].asString()]; + ++mPluginCntMap["aggregators"][pluginType]; } - for (auto detail : config.mFlushers) { - string name = (*detail)["Type"].asString(); + for (size_t i = 0; i < config.mFlushers.size(); ++i) { + const Json::Value& detail = *config.mFlushers[i]; + string pluginType = detail["Type"].asString(); unique_ptr flusher - = PluginRegistry::GetInstance()->CreateFlusher(name, GenNextPluginMeta(false)); + = PluginRegistry::GetInstance()->CreateFlusher(pluginType, GenNextPluginMeta(false)); if (flusher) { Json::Value optionalGoPipeline; - if (!flusher->Init(*detail, mContext, optionalGoPipeline)) { + if (!flusher->Init(detail, mContext, optionalGoPipeline)) { return false; } mFlushers.emplace_back(std::move(flusher)); @@ -143,18 +173,18 @@ bool Pipeline::Init(PipelineConfig&& config) { MergeGoPipeline(optionalGoPipeline, mGoPipelineWithoutInput); } } - if (name == FlusherSLS::sName) { + if (pluginType == FlusherSLS::sName) { hasFlusherSLS = true; mContext.SetSLSInfo(static_cast(mFlushers.back()->GetPlugin())); } } else { if (ShouldAddPluginToGoPipelineWithInput()) { - AddPluginToGoPipeline(*detail, "flushers", mGoPipelineWithInput); + AddPluginToGoPipeline(pluginType, detail, "flushers", mGoPipelineWithInput); } else { - AddPluginToGoPipeline(*detail, "flushers", mGoPipelineWithoutInput); + AddPluginToGoPipeline(pluginType, detail, "flushers", mGoPipelineWithoutInput); } } - ++mPluginCntMap["flushers"][name]; + ++mPluginCntMap["flushers"][pluginType]; } // route is only enabled in native flushing mode, thus the index in config is the same as that in mFlushers @@ -162,14 +192,17 @@ bool Pipeline::Init(PipelineConfig&& config) { return false; } - for (auto detail : config.mExtensions) { + for (size_t i = 0; i < config.mExtensions.size(); ++i) { + const Json::Value& detail = *config.mExtensions[i]; + string pluginType = detail["Type"].asString(); + GenNextPluginMeta(false); if (!mGoPipelineWithInput.isNull()) { - AddPluginToGoPipeline(*detail, "extensions", mGoPipelineWithInput); + AddPluginToGoPipeline(pluginType, detail, "extensions", mGoPipelineWithInput); } if (!mGoPipelineWithoutInput.isNull()) { - AddPluginToGoPipeline(*detail, "extensions", mGoPipelineWithoutInput); + AddPluginToGoPipeline(pluginType, detail, "extensions", mGoPipelineWithoutInput); } - ++mPluginCntMap["extensions"][(*detail)["Type"].asString()]; + ++mPluginCntMap["extensions"][pluginType]; } // global module must be initialized at last, since native input or flusher plugin may generate global param in Go @@ -258,7 +291,7 @@ bool Pipeline::Init(PipelineConfig&& config) { ProcessQueueManager::GetInstance()->CreateOrUpdateBoundedQueue(mContext.GetProcessQueueKey(), priority); } else { ProcessQueueManager::GetInstance()->CreateOrUpdateCircularQueue( - mContext.GetProcessQueueKey(), priority, 100); + mContext.GetProcessQueueKey(), priority, 1024); } @@ -393,10 +426,18 @@ void Pipeline::MergeGoPipeline(const Json::Value& src, Json::Value& dst) { } } -void Pipeline::AddPluginToGoPipeline(const Json::Value& plugin, const string& module, Json::Value& dst) { +std::string Pipeline::GenPluginTypeWithID(std::string pluginType, std::string pluginID) { + return pluginType + "/" + pluginID; +} + +// Rule: pluginTypeWithID=pluginType/pluginID#pluginPriority. +void Pipeline::AddPluginToGoPipeline(const string& pluginType, + const Json::Value& plugin, + const string& module, + Json::Value& dst) { Json::Value res(Json::objectValue), detail = plugin; detail.removeMember("Type"); - res["type"] = plugin["Type"]; + res["type"] = GenPluginTypeWithID(pluginType, GetNowPluginID()); res["detail"] = detail; dst[module].append(res); } @@ -460,11 +501,6 @@ std::string Pipeline::GetNowPluginID() { return std::to_string(mPluginID.load()); } -std::string Pipeline::GenNextPluginID() { - mPluginID.fetch_add(1); - return std::to_string(mPluginID.load()); -} - PluginInstance::PluginMeta Pipeline::GenNextPluginMeta(bool lastOne) { mPluginID.fetch_add(1); int32_t childNodeID = mPluginID.load(); diff --git a/core/pipeline/Pipeline.h b/core/pipeline/Pipeline.h index ae7e7f95b6..23055a99cd 100644 --- a/core/pipeline/Pipeline.h +++ b/core/pipeline/Pipeline.h @@ -24,14 +24,14 @@ #include #include "config/PipelineConfig.h" -#include "input/InputContainerStdio.h" -#include "input/InputFile.h" +#include "plugin/input/InputContainerStdio.h" +#include "plugin/input/InputFile.h" #include "models/PipelineEventGroup.h" #include "pipeline/PipelineContext.h" -#include "plugin/instance/FlusherInstance.h" -#include "plugin/instance/InputInstance.h" -#include "plugin/instance/ProcessorInstance.h" -#include "route/Router.h" +#include "pipeline/plugin/instance/FlusherInstance.h" +#include "pipeline/plugin/instance/InputInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "pipeline/route/Router.h" namespace logtail { @@ -60,7 +60,7 @@ class Pipeline { const std::vector>& GetInputs() const { return mInputs; } std::string GetNowPluginID(); - std::string GenNextPluginID(); + static std::string GenPluginTypeWithID(std::string pluginType, std::string pluginID); PluginInstance::PluginMeta GenNextPluginMeta(bool lastOne); bool HasGoPipelineWithInput() const { return !mGoPipelineWithInput.isNull(); } @@ -70,7 +70,10 @@ class Pipeline { private: void MergeGoPipeline(const Json::Value& src, Json::Value& dst); - void AddPluginToGoPipeline(const Json::Value& plugin, const std::string& module, Json::Value& dst); + void AddPluginToGoPipeline(const std::string& type, + const Json::Value& plugin, + const std::string& module, + Json::Value& dst); void CopyNativeGlobalParamToGoPipeline(Json::Value& root); bool ShouldAddPluginToGoPipelineWithInput() const { return mInputs.empty() && mProcessorLine.empty(); } diff --git a/core/pipeline/PipelineContext.cpp b/core/pipeline/PipelineContext.cpp index ac253d5ef2..17d63a9efa 100644 --- a/core/pipeline/PipelineContext.cpp +++ b/core/pipeline/PipelineContext.cpp @@ -14,8 +14,8 @@ #include "pipeline/PipelineContext.h" -#include "flusher/sls/FlusherSLS.h" -#include "queue/QueueKeyManager.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "pipeline/queue/QueueKeyManager.h" using namespace std; diff --git a/core/pipeline/PipelineContext.h b/core/pipeline/PipelineContext.h index 227f0b0fd4..8eaa1ce487 100644 --- a/core/pipeline/PipelineContext.h +++ b/core/pipeline/PipelineContext.h @@ -26,7 +26,7 @@ #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "pipeline/GlobalConfig.h" -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { diff --git a/core/pipeline/PipelineManager.cpp b/core/pipeline/PipelineManager.cpp index 03181cca48..3bbe4a57ff 100644 --- a/core/pipeline/PipelineManager.cpp +++ b/core/pipeline/PipelineManager.cpp @@ -16,7 +16,7 @@ #include "pipeline/PipelineManager.h" -#include "config_manager/ConfigManager.h" +#include "file_server/ConfigManager.h" #include "file_server/FileServer.h" #include "go_pipeline/LogtailPlugin.h" #include "prometheus/PrometheusInputRunner.h" @@ -24,15 +24,15 @@ #include "ebpf/eBPFServer.h" #include "observer/ObserverManager.h" #endif -#include "processor/daemon/LogProcess.h" +#include "runner/LogProcess.h" #if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) #include "app_config/AppConfig.h" #include "shennong/ShennongManager.h" #include "streamlog/StreamLogManager.h" #endif #include "config/feedbacker/ConfigFeedbackReceiver.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" using namespace std; @@ -46,33 +46,27 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) { static bool isInputStreamStarted = false; #endif bool isInputObserverChanged = false, isInputFileChanged = false, isInputStreamChanged = false, - isInputContainerStdioChanged = false, isInputPrometheusChanged = false, inputEbpfChanged = false; + isInputContainerStdioChanged = false; for (const auto& name : diff.mRemoved) { CheckIfInputUpdated(mPipelineNameEntityMap[name]->GetConfig()["inputs"][0], isInputObserverChanged, isInputFileChanged, isInputStreamChanged, - isInputContainerStdioChanged, - isInputPrometheusChanged, - inputEbpfChanged); + isInputContainerStdioChanged); } for (const auto& config : diff.mModified) { CheckIfInputUpdated(*config.mInputs[0], isInputObserverChanged, isInputFileChanged, isInputStreamChanged, - isInputContainerStdioChanged, - isInputPrometheusChanged, - inputEbpfChanged); + isInputContainerStdioChanged); } for (const auto& config : diff.mAdded) { CheckIfInputUpdated(*config.mInputs[0], isInputObserverChanged, isInputFileChanged, isInputStreamChanged, - isInputContainerStdioChanged, - isInputPrometheusChanged, - inputEbpfChanged); + isInputContainerStdioChanged); } #if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) @@ -92,16 +86,7 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) { FileServer::GetInstance()->Pause(); } LogProcess::GetInstance()->HoldOn(); - if (isInputPrometheusChanged) { - PrometheusInputRunner::GetInstance()->Start(); - } -#if defined(__linux__) && !defined(__ANDROID__) - // 和其它插件不同,ebpf需要init之后才能配置加载,最终状态这个init函数是在插件自己的start函数里面,目前暂时在此过渡。 - if (inputEbpfChanged) { - logtail::ebpf::eBPFServer::GetInstance()->Init(); - } - -#endif + LogtailPlugin::GetInstance()->HoldOn(false); #endif for (const auto& name : diff.mRemoved) { @@ -331,9 +316,7 @@ void PipelineManager::CheckIfInputUpdated(const Json::Value& config, bool& isInputObserverChanged, bool& isInputFileChanged, bool& isInputStreamChanged, - bool& isInputContainerStdioChanged, - bool& isInputPrometheusChanged, - bool& isInputEbpfChanged) { + bool& isInputContainerStdioChanged) { string inputType = config["Type"].asString(); if (inputType == "input_observer_network") { isInputObserverChanged = true; @@ -343,13 +326,6 @@ void PipelineManager::CheckIfInputUpdated(const Json::Value& config, isInputStreamChanged = true; } else if (inputType == "input_container_stdio") { isInputContainerStdioChanged = true; - } else if (inputType == "input_prometheus") { - isInputPrometheusChanged = true; - } else if (inputType == "input_ebpf_processprobe_security" || inputType == "input_ebpf_processprobe_observer" - || inputType == "input_ebpf_sockettraceprobe_security" - || inputType == "input_ebpf_sockettraceprobe_observer" || inputType == "input_ebpf_fileprobe_security" - || inputType == "input_ebpf_profilingprobe_observer") { - isInputEbpfChanged = true; } } diff --git a/core/pipeline/PipelineManager.h b/core/pipeline/PipelineManager.h index 4f36de5e8b..5f63b0fd25 100644 --- a/core/pipeline/PipelineManager.h +++ b/core/pipeline/PipelineManager.h @@ -62,9 +62,7 @@ class PipelineManager { bool& isInputObserverChanged, bool& isInputFileChanged, bool& isInputStreamChanged, - bool& isInputContainerStdioChanged, - bool& isInputPrometheusChanged, - bool& isInputEbpfChanged); + bool& isInputContainerStdioChanged); std::unordered_map> mPipelineNameEntityMap; mutable SpinLock mPluginCntMapLock; diff --git a/core/pipeline/ProcessConfigManager.cpp b/core/pipeline/ProcessConfigManager.cpp deleted file mode 100644 index e2bbe10451..0000000000 --- a/core/pipeline/ProcessConfigManager.cpp +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2023 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "pipeline/ProcessConfigManager.h" - -#include "config/feedbacker/ConfigFeedbackReceiver.h" - -using namespace std; - -namespace logtail { - -ProcessConfigManager::ProcessConfigManager() { -} - -void ProcessConfigManager::UpdateProcessConfigs(ProcessConfigDiff& diff) { - for (auto& config : diff.mAdded) { - std::shared_ptr configTmp(new ProcessConfig(config.mName, std::move(config.mDetail))); - mProcessConfigMap[config.mName] = configTmp; - ConfigFeedbackReceiver::GetInstance().FeedbackProcessConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED); - } - for (auto& config : diff.mModified) { - std::shared_ptr configTmp(new ProcessConfig(config.mName, std::move(config.mDetail))); - mProcessConfigMap[config.mName] = configTmp; - ConfigFeedbackReceiver::GetInstance().FeedbackProcessConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED); - } - for (auto& configName : diff.mRemoved) { - mProcessConfigMap.erase(configName); - ConfigFeedbackReceiver::GetInstance().FeedbackProcessConfigStatus(configName, ConfigFeedbackStatus::DELETED); - } -} - -std::shared_ptr ProcessConfigManager::FindConfigByName(const string& configName) const { - auto it = mProcessConfigMap.find(configName); - if (it != mProcessConfigMap.end()) { - return it->second; - } - return nullptr; -} - -vector ProcessConfigManager::GetAllConfigNames() const { - vector res; - for (const auto& item : mProcessConfigMap) { - res.push_back(item.first); - } - return res; -} - -} // namespace logtail diff --git a/core/batch/BatchItem.h b/core/pipeline/batch/BatchItem.h similarity index 97% rename from core/batch/BatchItem.h rename to core/pipeline/batch/BatchItem.h index 3d1d0ff211..b27fa0f5e8 100644 --- a/core/batch/BatchItem.h +++ b/core/pipeline/batch/BatchItem.h @@ -20,9 +20,9 @@ #include #include -#include "batch/BatchStatus.h" -#include "batch/BatchedEvents.h" -#include "batch/FlushStrategy.h" +#include "pipeline/batch/BatchStatus.h" +#include "pipeline/batch/BatchedEvents.h" +#include "pipeline/batch/FlushStrategy.h" #include "models/PipelineEventGroup.h" #include "models/StringView.h" diff --git a/core/batch/BatchStatus.h b/core/pipeline/batch/BatchStatus.h similarity index 98% rename from core/batch/BatchStatus.h rename to core/pipeline/batch/BatchStatus.h index 3aeae30f0d..18c2b7db6c 100644 --- a/core/batch/BatchStatus.h +++ b/core/pipeline/batch/BatchStatus.h @@ -19,7 +19,7 @@ #include #include -#include "batch/BatchedEvents.h" +#include "pipeline/batch/BatchedEvents.h" #include "models/PipelineEventPtr.h" namespace logtail { diff --git a/core/batch/BatchedEvents.h b/core/pipeline/batch/BatchedEvents.h similarity index 100% rename from core/batch/BatchedEvents.h rename to core/pipeline/batch/BatchedEvents.h diff --git a/core/batch/Batcher.h b/core/pipeline/batch/Batcher.h similarity index 98% rename from core/batch/Batcher.h rename to core/pipeline/batch/Batcher.h index 67ba0c190a..b165bab370 100644 --- a/core/batch/Batcher.h +++ b/core/pipeline/batch/Batcher.h @@ -24,10 +24,10 @@ #include #include -#include "batch/BatchItem.h" -#include "batch/BatchStatus.h" -#include "batch/FlushStrategy.h" -#include "batch/TimeoutFlushManager.h" +#include "pipeline/batch/BatchItem.h" +#include "pipeline/batch/BatchStatus.h" +#include "pipeline/batch/FlushStrategy.h" +#include "pipeline/batch/TimeoutFlushManager.h" #include "common/Flags.h" #include "common/ParamExtractor.h" #include "models/PipelineEventGroup.h" diff --git a/core/batch/FlushStrategy.cpp b/core/pipeline/batch/FlushStrategy.cpp similarity index 95% rename from core/batch/FlushStrategy.cpp rename to core/pipeline/batch/FlushStrategy.cpp index ae60a84fe7..58a3899e69 100644 --- a/core/batch/FlushStrategy.cpp +++ b/core/pipeline/batch/FlushStrategy.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "batch/FlushStrategy.h" +#include "pipeline/batch/FlushStrategy.h" using namespace std; diff --git a/core/batch/FlushStrategy.h b/core/pipeline/batch/FlushStrategy.h similarity index 98% rename from core/batch/FlushStrategy.h rename to core/pipeline/batch/FlushStrategy.h index 0609fe2e98..d248cc7f10 100644 --- a/core/batch/FlushStrategy.h +++ b/core/pipeline/batch/FlushStrategy.h @@ -21,7 +21,7 @@ #include #include -#include "batch/BatchStatus.h" +#include "pipeline/batch/BatchStatus.h" #include "models/PipelineEventPtr.h" namespace logtail { diff --git a/core/batch/TimeoutFlushManager.cpp b/core/pipeline/batch/TimeoutFlushManager.cpp similarity index 97% rename from core/batch/TimeoutFlushManager.cpp rename to core/pipeline/batch/TimeoutFlushManager.cpp index 471a34559d..3bf1bc7911 100644 --- a/core/batch/TimeoutFlushManager.cpp +++ b/core/pipeline/batch/TimeoutFlushManager.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "batch/TimeoutFlushManager.h" +#include "pipeline/batch/TimeoutFlushManager.h" using namespace std; diff --git a/core/batch/TimeoutFlushManager.h b/core/pipeline/batch/TimeoutFlushManager.h similarity index 97% rename from core/batch/TimeoutFlushManager.h rename to core/pipeline/batch/TimeoutFlushManager.h index 9aa2379c29..e72d90011e 100644 --- a/core/batch/TimeoutFlushManager.h +++ b/core/pipeline/batch/TimeoutFlushManager.h @@ -23,7 +23,7 @@ #include #include -#include "plugin/interface/Flusher.h" +#include "pipeline/plugin/interface/Flusher.h" namespace logtail { diff --git a/core/compression/CompressType.h b/core/pipeline/compression/CompressType.h similarity index 100% rename from core/compression/CompressType.h rename to core/pipeline/compression/CompressType.h diff --git a/core/compression/Compressor.h b/core/pipeline/compression/Compressor.h similarity index 96% rename from core/compression/Compressor.h rename to core/pipeline/compression/Compressor.h index 68fdc3ca11..1694f6e6ad 100644 --- a/core/compression/Compressor.h +++ b/core/pipeline/compression/Compressor.h @@ -18,7 +18,7 @@ #include -#include "compression/CompressType.h" +#include "pipeline/compression/CompressType.h" namespace logtail { diff --git a/core/compression/CompressorFactory.cpp b/core/pipeline/compression/CompressorFactory.cpp similarity index 92% rename from core/compression/CompressorFactory.cpp rename to core/pipeline/compression/CompressorFactory.cpp index fbee50949f..1c91f24035 100644 --- a/core/compression/CompressorFactory.cpp +++ b/core/pipeline/compression/CompressorFactory.cpp @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "compression/CompressorFactory.h" +#include "pipeline/compression/CompressorFactory.h" #include "common/ParamExtractor.h" -#include "compression/LZ4Compressor.h" -#include "compression/ZstdCompressor.h" +#include "pipeline/compression/LZ4Compressor.h" +#include "pipeline/compression/ZstdCompressor.h" using namespace std; @@ -24,7 +24,7 @@ namespace logtail { unique_ptr CompressorFactory::Create(const Json::Value& config, const PipelineContext& ctx, - const string& pluginName, + const string& pluginType, CompressType defaultType) { string compressType, errorMsg; if (!GetOptionalStringParam(config, "CompressType", compressType, errorMsg)) { @@ -32,7 +32,7 @@ unique_ptr CompressorFactory::Create(const Json::Value& config, ctx.GetAlarm(), errorMsg, CompressTypeToString(defaultType), - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -49,7 +49,7 @@ unique_ptr CompressorFactory::Create(const Json::Value& config, ctx.GetAlarm(), "string param CompressType is not valid", CompressTypeToString(defaultType), - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), diff --git a/core/compression/CompressorFactory.h b/core/pipeline/compression/CompressorFactory.h similarity index 93% rename from core/compression/CompressorFactory.h rename to core/pipeline/compression/CompressorFactory.h index 7e4e0049a5..dbdc826f16 100644 --- a/core/compression/CompressorFactory.h +++ b/core/pipeline/compression/CompressorFactory.h @@ -21,8 +21,8 @@ #include #include -#include "compression/CompressType.h" -#include "compression/Compressor.h" +#include "pipeline/compression/CompressType.h" +#include "pipeline/compression/Compressor.h" #include "pipeline/PipelineContext.h" namespace logtail { @@ -41,7 +41,7 @@ class CompressorFactory { std::unique_ptr Create(const Json::Value& config, const PipelineContext& ctx, - const std::string& pluginName, + const std::string& pluginType, CompressType defaultType); private: diff --git a/core/compression/LZ4Compressor.cpp b/core/pipeline/compression/LZ4Compressor.cpp similarity index 97% rename from core/compression/LZ4Compressor.cpp rename to core/pipeline/compression/LZ4Compressor.cpp index 292b3803d6..7063812610 100644 --- a/core/compression/LZ4Compressor.cpp +++ b/core/pipeline/compression/LZ4Compressor.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "compression/LZ4Compressor.h" +#include "pipeline/compression/LZ4Compressor.h" #include diff --git a/core/compression/LZ4Compressor.h b/core/pipeline/compression/LZ4Compressor.h similarity index 95% rename from core/compression/LZ4Compressor.h rename to core/pipeline/compression/LZ4Compressor.h index 17b0ea24b3..6a64ea2af6 100644 --- a/core/compression/LZ4Compressor.h +++ b/core/pipeline/compression/LZ4Compressor.h @@ -16,7 +16,7 @@ #pragma once -#include "compression/Compressor.h" +#include "pipeline/compression/Compressor.h" namespace logtail { diff --git a/core/compression/ZstdCompressor.cpp b/core/pipeline/compression/ZstdCompressor.cpp similarity index 97% rename from core/compression/ZstdCompressor.cpp rename to core/pipeline/compression/ZstdCompressor.cpp index d80cbc7232..6e47985b5e 100644 --- a/core/compression/ZstdCompressor.cpp +++ b/core/pipeline/compression/ZstdCompressor.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "compression/ZstdCompressor.h" +#include "pipeline/compression/ZstdCompressor.h" #include diff --git a/core/compression/ZstdCompressor.h b/core/pipeline/compression/ZstdCompressor.h similarity index 96% rename from core/compression/ZstdCompressor.h rename to core/pipeline/compression/ZstdCompressor.h index 8691c6e237..1e910a2b0d 100644 --- a/core/compression/ZstdCompressor.h +++ b/core/pipeline/compression/ZstdCompressor.h @@ -16,7 +16,7 @@ #pragma once -#include "compression/Compressor.h" +#include "pipeline/compression/Compressor.h" namespace logtail { diff --git a/core/sender/ConcurrencyLimiter.cpp b/core/pipeline/limiter/ConcurrencyLimiter.cpp similarity index 94% rename from core/sender/ConcurrencyLimiter.cpp rename to core/pipeline/limiter/ConcurrencyLimiter.cpp index 6941f58969..a7a8be80a6 100644 --- a/core/sender/ConcurrencyLimiter.cpp +++ b/core/pipeline/limiter/ConcurrencyLimiter.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "sender/ConcurrencyLimiter.h" +#include "pipeline/limiter/ConcurrencyLimiter.h" using namespace std; diff --git a/core/sender/ConcurrencyLimiter.h b/core/pipeline/limiter/ConcurrencyLimiter.h similarity index 100% rename from core/sender/ConcurrencyLimiter.h rename to core/pipeline/limiter/ConcurrencyLimiter.h diff --git a/core/sender/RateLimiter.cpp b/core/pipeline/limiter/RateLimiter.cpp similarity index 97% rename from core/sender/RateLimiter.cpp rename to core/pipeline/limiter/RateLimiter.cpp index 88a412c4e7..cd44906b2c 100644 --- a/core/sender/RateLimiter.cpp +++ b/core/pipeline/limiter/RateLimiter.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "sender/RateLimiter.h" +#include "pipeline/limiter/RateLimiter.h" #include "logger/Logger.h" // TODO: temporarily used diff --git a/core/sender/RateLimiter.h b/core/pipeline/limiter/RateLimiter.h similarity index 100% rename from core/sender/RateLimiter.h rename to core/pipeline/limiter/RateLimiter.h diff --git a/core/plugin/PluginRegistry.cpp b/core/pipeline/plugin/PluginRegistry.cpp similarity index 80% rename from core/plugin/PluginRegistry.cpp rename to core/pipeline/plugin/PluginRegistry.cpp index 6067f31f48..b831a2a37d 100644 --- a/core/plugin/PluginRegistry.cpp +++ b/core/pipeline/plugin/PluginRegistry.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/PluginRegistry.h" +#include "pipeline/plugin/PluginRegistry.h" #include #include @@ -25,43 +25,43 @@ #include "app_config/AppConfig.h" #include "common/Flags.h" -#include "flusher/blackhole/FlusherBlackHole.h" -#include "flusher/sls/FlusherSLS.h" -#include "input/InputContainerStdio.h" -#include "input/InputFile.h" -#include "input/InputPrometheus.h" +#include "plugin/flusher/blackhole/FlusherBlackHole.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "plugin/input/InputContainerStdio.h" +#include "plugin/input/InputFile.h" +#include "plugin/input/InputPrometheus.h" #if defined(__linux__) && !defined(__ANDROID__) -#include "input/InputEBPFFileSecurity.h" -#include "input/InputEBPFNetworkObserver.h" -#include "input/InputEBPFNetworkSecurity.h" -#include "input/InputEBPFProcessSecurity.h" -#include "input/InputObserverNetwork.h" +#include "plugin/input/InputEBPFFileSecurity.h" +#include "plugin/input/InputEBPFNetworkObserver.h" +#include "plugin/input/InputEBPFNetworkSecurity.h" +#include "plugin/input/InputEBPFProcessSecurity.h" +#include "plugin/input/InputObserverNetwork.h" #ifdef __ENTERPRISE__ -#include "input/InputStream.h" +#include "plugin/input/InputStream.h" #endif #endif #include "logger/Logger.h" -#include "plugin/creator/CProcessor.h" -#include "plugin/creator/DynamicCProcessorCreator.h" -#include "plugin/creator/StaticFlusherCreator.h" -#include "plugin/creator/StaticInputCreator.h" -#include "plugin/creator/StaticProcessorCreator.h" -#include "processor/ProcessorDesensitizeNative.h" -#include "processor/ProcessorFilterNative.h" -#include "processor/ProcessorParseApsaraNative.h" -#include "processor/ProcessorParseDelimiterNative.h" -#include "processor/ProcessorParseJsonNative.h" -#include "processor/ProcessorParseRegexNative.h" -#include "processor/ProcessorParseTimestampNative.h" -#include "processor/inner/ProcessorPromParseMetricNative.h" -#include "processor/inner/ProcessorMergeMultilineLogNative.h" -#include "processor/inner/ProcessorParseContainerLogNative.h" -#include "processor/inner/ProcessorPromRelabelMetricNative.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" -#include "processor/inner/ProcessorTagNative.h" +#include "pipeline/plugin/creator/CProcessor.h" +#include "pipeline/plugin/creator/DynamicCProcessorCreator.h" +#include "pipeline/plugin/creator/StaticFlusherCreator.h" +#include "pipeline/plugin/creator/StaticInputCreator.h" +#include "pipeline/plugin/creator/StaticProcessorCreator.h" +#include "plugin/processor/ProcessorDesensitizeNative.h" +#include "plugin/processor/ProcessorFilterNative.h" +#include "plugin/processor/ProcessorParseApsaraNative.h" +#include "plugin/processor/ProcessorParseDelimiterNative.h" +#include "plugin/processor/ProcessorParseJsonNative.h" +#include "plugin/processor/ProcessorParseRegexNative.h" +#include "plugin/processor/ProcessorParseTimestampNative.h" +#include "plugin/processor/inner/ProcessorPromParseMetricNative.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" +#include "plugin/processor/inner/ProcessorPromRelabelMetricNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "plugin/processor/inner/ProcessorTagNative.h" #if defined(__linux__) && !defined(__ANDROID__) && !defined(__EXCLUDE_SPL__) -#include "processor/ProcessorSPL.h" +#include "plugin/processor/ProcessorSPL.h" #endif @@ -172,13 +172,13 @@ void PluginRegistry::LoadDynamicPlugins(const set& plugins) { } string error; auto pluginDir = AppConfig::GetInstance()->GetProcessExecutionDir() + "/plugins"; - for (auto& pluginName : plugins) { + for (auto& pluginType : plugins) { DynamicLibLoader loader; - if (!loader.LoadDynLib(pluginName, error, pluginDir)) { - LOG_ERROR(sLogger, ("open plugin", pluginName)("error", error)); + if (!loader.LoadDynLib(pluginType, error, pluginDir)) { + LOG_ERROR(sLogger, ("open plugin", pluginType)("error", error)); continue; } - PluginCreator* creator = LoadProcessorPlugin(loader, pluginName); + PluginCreator* creator = LoadProcessorPlugin(loader, pluginType); if (creator) { RegisterProcessorCreator(creator); continue; @@ -198,7 +198,7 @@ void PluginRegistry::RegisterFlusherCreator(PluginCreator* creator) { RegisterCreator(FLUSHER_PLUGIN, creator); } -PluginCreator* PluginRegistry::LoadProcessorPlugin(DynamicLibLoader& loader, const string pluginName) { +PluginCreator* PluginRegistry::LoadProcessorPlugin(DynamicLibLoader& loader, const string pluginType) { string error; processor_interface_t* plugin = (processor_interface_t*)loader.LoadMethod("processor_interface", error); // if (!error.empty()) { @@ -213,7 +213,7 @@ PluginCreator* PluginRegistry::LoadProcessorPlugin(DynamicLibLoader& loader, con } if (plugin->version != PROCESSOR_INTERFACE_VERSION) { LOG_ERROR(sLogger, - ("load plugin", pluginName)("error", "plugin interface version mismatch")( + ("load plugin", pluginType)("error", "plugin interface version mismatch")( "expected", PROCESSOR_INTERFACE_VERSION)("actual", plugin->version)); return nullptr; } diff --git a/core/plugin/PluginRegistry.h b/core/pipeline/plugin/PluginRegistry.h similarity index 90% rename from core/plugin/PluginRegistry.h rename to core/pipeline/plugin/PluginRegistry.h index af4f49e9c5..22213d6c39 100644 --- a/core/plugin/PluginRegistry.h +++ b/core/pipeline/plugin/PluginRegistry.h @@ -22,12 +22,12 @@ #include #include "common/DynamicLibHelper.h" -#include "plugin/creator/PluginCreator.h" -#include "plugin/instance/FlusherInstance.h" -#include "plugin/instance/InputInstance.h" -#include "plugin/instance/PluginInstance.h" -#include "plugin/instance/ProcessorInstance.h" -#include "sink/SinkType.h" +#include "pipeline/plugin/creator/PluginCreator.h" +#include "pipeline/plugin/instance/FlusherInstance.h" +#include "pipeline/plugin/instance/InputInstance.h" +#include "pipeline/plugin/instance/PluginInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "runner/sink/SinkType.h" struct processor_interface_t; @@ -77,7 +77,7 @@ class PluginRegistry { void RegisterInputCreator(PluginCreator* creator); void RegisterProcessorCreator(PluginCreator* creator); void RegisterFlusherCreator(PluginCreator* creator); - PluginCreator* LoadProcessorPlugin(DynamicLibLoader& loader, const std::string pluginName); + PluginCreator* LoadProcessorPlugin(DynamicLibLoader& loader, const std::string pluginType); void RegisterCreator(PluginCat cat, PluginCreator* creator); std::unique_ptr Create(PluginCat cat, const std::string& name, const PluginInstance::PluginMeta& pluginMeta); diff --git a/core/plugin/creator/CProcessor.h b/core/pipeline/plugin/creator/CProcessor.h similarity index 100% rename from core/plugin/creator/CProcessor.h rename to core/pipeline/plugin/creator/CProcessor.h diff --git a/core/plugin/creator/DynamicCProcessorCreator.cpp b/core/pipeline/plugin/creator/DynamicCProcessorCreator.cpp similarity index 88% rename from core/plugin/creator/DynamicCProcessorCreator.cpp rename to core/pipeline/plugin/creator/DynamicCProcessorCreator.cpp index ab78eebd05..ef8c088d06 100644 --- a/core/plugin/creator/DynamicCProcessorCreator.cpp +++ b/core/pipeline/plugin/creator/DynamicCProcessorCreator.cpp @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "plugin/creator/DynamicCProcessorCreator.h" +#include "pipeline/plugin/creator/DynamicCProcessorCreator.h" #include "common/DynamicLibHelper.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/DynamicCProcessorProxy.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/DynamicCProcessorProxy.h" namespace logtail { diff --git a/core/plugin/creator/DynamicCProcessorCreator.h b/core/pipeline/plugin/creator/DynamicCProcessorCreator.h similarity index 91% rename from core/plugin/creator/DynamicCProcessorCreator.h rename to core/pipeline/plugin/creator/DynamicCProcessorCreator.h index 628284d46e..68946ceeb8 100644 --- a/core/plugin/creator/DynamicCProcessorCreator.h +++ b/core/pipeline/plugin/creator/DynamicCProcessorCreator.h @@ -16,8 +16,8 @@ #pragma once -#include "plugin/creator/CProcessor.h" -#include "plugin/creator/PluginCreator.h" +#include "pipeline/plugin/creator/CProcessor.h" +#include "pipeline/plugin/creator/PluginCreator.h" namespace logtail { diff --git a/core/plugin/creator/PluginCreator.h b/core/pipeline/plugin/creator/PluginCreator.h similarity index 94% rename from core/plugin/creator/PluginCreator.h rename to core/pipeline/plugin/creator/PluginCreator.h index 6ae52e7464..6888927186 100644 --- a/core/plugin/creator/PluginCreator.h +++ b/core/pipeline/plugin/creator/PluginCreator.h @@ -19,7 +19,7 @@ #include #include -#include "plugin/instance/PluginInstance.h" +#include "pipeline/plugin/instance/PluginInstance.h" namespace logtail { diff --git a/core/plugin/creator/StaticFlusherCreator.h b/core/pipeline/plugin/creator/StaticFlusherCreator.h similarity index 90% rename from core/plugin/creator/StaticFlusherCreator.h rename to core/pipeline/plugin/creator/StaticFlusherCreator.h index 209be79b55..3c2c1f0eec 100644 --- a/core/plugin/creator/StaticFlusherCreator.h +++ b/core/pipeline/plugin/creator/StaticFlusherCreator.h @@ -16,8 +16,8 @@ #pragma once -#include "plugin/creator/PluginCreator.h" -#include "plugin/instance/FlusherInstance.h" +#include "pipeline/plugin/creator/PluginCreator.h" +#include "pipeline/plugin/instance/FlusherInstance.h" namespace logtail { diff --git a/core/plugin/creator/StaticInputCreator.h b/core/pipeline/plugin/creator/StaticInputCreator.h similarity index 90% rename from core/plugin/creator/StaticInputCreator.h rename to core/pipeline/plugin/creator/StaticInputCreator.h index 833af63729..89029d7c6f 100644 --- a/core/plugin/creator/StaticInputCreator.h +++ b/core/pipeline/plugin/creator/StaticInputCreator.h @@ -16,8 +16,8 @@ #pragma once -#include "plugin/creator/PluginCreator.h" -#include "plugin/instance/InputInstance.h" +#include "pipeline/plugin/creator/PluginCreator.h" +#include "pipeline/plugin/instance/InputInstance.h" namespace logtail { diff --git a/core/plugin/creator/StaticProcessorCreator.h b/core/pipeline/plugin/creator/StaticProcessorCreator.h similarity index 90% rename from core/plugin/creator/StaticProcessorCreator.h rename to core/pipeline/plugin/creator/StaticProcessorCreator.h index 9cb5197199..dffdffe9ed 100644 --- a/core/plugin/creator/StaticProcessorCreator.h +++ b/core/pipeline/plugin/creator/StaticProcessorCreator.h @@ -16,8 +16,8 @@ #pragma once -#include "plugin/creator/PluginCreator.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/creator/PluginCreator.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { diff --git a/core/plugin/instance/FlusherInstance.cpp b/core/pipeline/plugin/instance/FlusherInstance.cpp similarity index 96% rename from core/plugin/instance/FlusherInstance.cpp rename to core/pipeline/plugin/instance/FlusherInstance.cpp index 610d408198..a48b3a7f85 100644 --- a/core/plugin/instance/FlusherInstance.cpp +++ b/core/pipeline/plugin/instance/FlusherInstance.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/instance/FlusherInstance.h" +#include "pipeline/plugin/instance/FlusherInstance.h" #include "monitor/MetricConstants.h" diff --git a/core/plugin/instance/FlusherInstance.h b/core/pipeline/plugin/instance/FlusherInstance.h similarity index 92% rename from core/plugin/instance/FlusherInstance.h rename to core/pipeline/plugin/instance/FlusherInstance.h index f18c612ff1..2900a594df 100644 --- a/core/plugin/instance/FlusherInstance.h +++ b/core/pipeline/plugin/instance/FlusherInstance.h @@ -23,9 +23,9 @@ #include "models/PipelineEventGroup.h" #include "monitor/PluginMetricManager.h" #include "pipeline/PipelineContext.h" -#include "plugin/instance/PluginInstance.h" -#include "plugin/interface/Flusher.h" -#include "queue/QueueKey.h" +#include "pipeline/plugin/instance/PluginInstance.h" +#include "pipeline/plugin/interface/Flusher.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { diff --git a/core/plugin/instance/InputInstance.cpp b/core/pipeline/plugin/instance/InputInstance.cpp similarity index 95% rename from core/plugin/instance/InputInstance.cpp rename to core/pipeline/plugin/instance/InputInstance.cpp index 9fd00ff364..8b3dfffc31 100644 --- a/core/plugin/instance/InputInstance.cpp +++ b/core/pipeline/plugin/instance/InputInstance.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/instance/InputInstance.h" +#include "pipeline/plugin/instance/InputInstance.h" namespace logtail { bool InputInstance::Init(const Json::Value& config, diff --git a/core/plugin/instance/InputInstance.h b/core/pipeline/plugin/instance/InputInstance.h similarity index 94% rename from core/plugin/instance/InputInstance.h rename to core/pipeline/plugin/instance/InputInstance.h index 718f25000b..139f5b554b 100644 --- a/core/plugin/instance/InputInstance.h +++ b/core/pipeline/plugin/instance/InputInstance.h @@ -21,8 +21,8 @@ #include #include "pipeline/PipelineContext.h" -#include "plugin/instance/PluginInstance.h" -#include "plugin/interface/Input.h" +#include "pipeline/plugin/instance/PluginInstance.h" +#include "pipeline/plugin/interface/Input.h" namespace logtail { diff --git a/core/plugin/instance/PluginInstance.h b/core/pipeline/plugin/instance/PluginInstance.h similarity index 100% rename from core/plugin/instance/PluginInstance.h rename to core/pipeline/plugin/instance/PluginInstance.h diff --git a/core/plugin/instance/ProcessorInstance.cpp b/core/pipeline/plugin/instance/ProcessorInstance.cpp similarity index 97% rename from core/plugin/instance/ProcessorInstance.cpp rename to core/pipeline/plugin/instance/ProcessorInstance.cpp index 74fb309cdf..8790886cd4 100644 --- a/core/plugin/instance/ProcessorInstance.cpp +++ b/core/pipeline/plugin/instance/ProcessorInstance.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" #include diff --git a/core/plugin/instance/ProcessorInstance.h b/core/pipeline/plugin/instance/ProcessorInstance.h similarity index 95% rename from core/plugin/instance/ProcessorInstance.h rename to core/pipeline/plugin/instance/ProcessorInstance.h index e0a802c0c8..5f96756688 100644 --- a/core/plugin/instance/ProcessorInstance.h +++ b/core/pipeline/plugin/instance/ProcessorInstance.h @@ -23,8 +23,8 @@ #include "models/PipelineEventGroup.h" #include "monitor/LogtailMetric.h" #include "pipeline/PipelineContext.h" -#include "plugin/instance/PluginInstance.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/instance/PluginInstance.h" +#include "pipeline/plugin/interface/Processor.h" namespace logtail { diff --git a/core/plugin/interface/Flusher.cpp b/core/pipeline/plugin/interface/Flusher.cpp similarity index 90% rename from core/plugin/interface/Flusher.cpp rename to core/pipeline/plugin/interface/Flusher.cpp index 43d5da9ac4..432b184fd2 100644 --- a/core/plugin/interface/Flusher.cpp +++ b/core/pipeline/plugin/interface/Flusher.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/interface/Flusher.h" +#include "pipeline/plugin/interface/Flusher.h" -#include "queue/QueueKeyManager.h" -#include "queue/SenderQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SenderQueueManager.h" // TODO: temporarily used here #include "pipeline/PipelineManager.h" @@ -23,6 +23,11 @@ using namespace std; namespace logtail { +bool Flusher::Start() { + SenderQueueManager::GetInstance()->ReuseQueue(mQueueKey); + return true; +} + bool Flusher::Stop(bool isPipelineRemoving) { SenderQueueManager::GetInstance()->DeleteQueue(mQueueKey); return true; @@ -46,9 +51,9 @@ bool Flusher::PushToQueue(unique_ptr&& item, uint32_t retryTime } #endif - const string& str = QueueKeyManager::GetInstance()->GetName(mQueueKey); + const string& str = QueueKeyManager::GetInstance()->GetName(item->mQueueKey); for (size_t i = 0; i < retryTimes; ++i) { - int rst = SenderQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item)); + int rst = SenderQueueManager::GetInstance()->PushQueue(item->mQueueKey, std::move(item)); if (rst == 0) { return true; } diff --git a/core/plugin/interface/Flusher.h b/core/pipeline/plugin/interface/Flusher.h similarity index 89% rename from core/plugin/interface/Flusher.h rename to core/pipeline/plugin/interface/Flusher.h index 3d6a73cbd9..5df95b28b8 100644 --- a/core/plugin/interface/Flusher.h +++ b/core/pipeline/plugin/interface/Flusher.h @@ -22,10 +22,10 @@ #include #include "models/PipelineEventGroup.h" -#include "plugin/interface/Plugin.h" -#include "queue/QueueKey.h" -#include "queue/SenderQueueItem.h" -#include "sink/SinkType.h" +#include "pipeline/plugin/interface/Plugin.h" +#include "pipeline/queue/QueueKey.h" +#include "pipeline/queue/SenderQueueItem.h" +#include "runner/sink/SinkType.h" namespace logtail { @@ -34,7 +34,7 @@ class Flusher : public Plugin { virtual ~Flusher() = default; virtual bool Init(const Json::Value& config, Json::Value& optionalGoPipeline) = 0; - virtual bool Start() { return true; } + virtual bool Start(); virtual bool Stop(bool isPipelineRemoving); virtual bool Send(PipelineEventGroup&& g) = 0; virtual bool Flush(size_t key) = 0; diff --git a/core/plugin/interface/HttpFlusher.h b/core/pipeline/plugin/interface/HttpFlusher.h similarity index 88% rename from core/plugin/interface/HttpFlusher.h rename to core/pipeline/plugin/interface/HttpFlusher.h index 8b3a26ea27..27b2bde2ff 100644 --- a/core/plugin/interface/HttpFlusher.h +++ b/core/pipeline/plugin/interface/HttpFlusher.h @@ -17,9 +17,9 @@ #pragma once #include "common/http/HttpResponse.h" -#include "plugin/interface/Flusher.h" -#include "queue/SenderQueueItem.h" -#include "sink/http/HttpSinkRequest.h" +#include "pipeline/plugin/interface/Flusher.h" +#include "pipeline/queue/SenderQueueItem.h" +#include "runner/sink/http/HttpSinkRequest.h" namespace logtail { diff --git a/core/plugin/interface/Input.h b/core/pipeline/plugin/interface/Input.h similarity index 92% rename from core/plugin/interface/Input.h rename to core/pipeline/plugin/interface/Input.h index 91593cf310..98393aeb33 100644 --- a/core/plugin/interface/Input.h +++ b/core/pipeline/plugin/interface/Input.h @@ -21,8 +21,8 @@ #include #include -#include "plugin/instance/ProcessorInstance.h" -#include "plugin/interface/Plugin.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/interface/Plugin.h" namespace logtail { diff --git a/core/plugin/interface/Plugin.h b/core/pipeline/plugin/interface/Plugin.h similarity index 100% rename from core/plugin/interface/Plugin.h rename to core/pipeline/plugin/interface/Plugin.h diff --git a/core/plugin/interface/Processor.cpp b/core/pipeline/plugin/interface/Processor.cpp similarity index 94% rename from core/plugin/interface/Processor.cpp rename to core/pipeline/plugin/interface/Processor.cpp index 86f0c27435..22f710ba58 100644 --- a/core/plugin/interface/Processor.cpp +++ b/core/pipeline/plugin/interface/Processor.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" using namespace std; diff --git a/core/plugin/interface/Processor.h b/core/pipeline/plugin/interface/Processor.h similarity index 96% rename from core/plugin/interface/Processor.h rename to core/pipeline/plugin/interface/Processor.h index b902e9827d..f41e88c471 100644 --- a/core/plugin/interface/Processor.h +++ b/core/pipeline/plugin/interface/Processor.h @@ -20,7 +20,7 @@ #include "models/PipelineEventGroup.h" #include "models/PipelineEventPtr.h" -#include "plugin/interface/Plugin.h" +#include "pipeline/plugin/interface/Plugin.h" namespace logtail { diff --git a/core/queue/BoundedProcessQueue.cpp b/core/pipeline/queue/BoundedProcessQueue.cpp similarity index 97% rename from core/queue/BoundedProcessQueue.cpp rename to core/pipeline/queue/BoundedProcessQueue.cpp index f29c8cf431..470d134476 100644 --- a/core/queue/BoundedProcessQueue.cpp +++ b/core/pipeline/queue/BoundedProcessQueue.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/BoundedProcessQueue.h" +#include "pipeline/queue/BoundedProcessQueue.h" using namespace std; diff --git a/core/queue/BoundedProcessQueue.h b/core/pipeline/queue/BoundedProcessQueue.h similarity index 95% rename from core/queue/BoundedProcessQueue.h rename to core/pipeline/queue/BoundedProcessQueue.h index 5617b4dd4f..8b65c17193 100644 --- a/core/queue/BoundedProcessQueue.h +++ b/core/pipeline/queue/BoundedProcessQueue.h @@ -22,8 +22,8 @@ #include #include "common/FeedbackInterface.h" -#include "queue/BoundedQueueInterface.h" -#include "queue/ProcessQueueInterface.h" +#include "pipeline/queue/BoundedQueueInterface.h" +#include "pipeline/queue/ProcessQueueInterface.h" namespace logtail { diff --git a/core/queue/BoundedQueueInterface.h b/core/pipeline/queue/BoundedQueueInterface.h similarity index 98% rename from core/queue/BoundedQueueInterface.h rename to core/pipeline/queue/BoundedQueueInterface.h index 52f548d757..7b2c0439a5 100644 --- a/core/queue/BoundedQueueInterface.h +++ b/core/pipeline/queue/BoundedQueueInterface.h @@ -16,7 +16,7 @@ #pragma once -#include "queue/QueueInterface.h" +#include "pipeline/queue/QueueInterface.h" namespace logtail { diff --git a/core/queue/BoundedSenderQueueInterface.cpp b/core/pipeline/queue/BoundedSenderQueueInterface.cpp similarity index 96% rename from core/queue/BoundedSenderQueueInterface.cpp rename to core/pipeline/queue/BoundedSenderQueueInterface.cpp index 9eabcc8d0c..85bb8b91a8 100644 --- a/core/queue/BoundedSenderQueueInterface.cpp +++ b/core/pipeline/queue/BoundedSenderQueueInterface.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/BoundedSenderQueueInterface.h" +#include "pipeline/queue/BoundedSenderQueueInterface.h" using namespace std; diff --git a/core/queue/BoundedSenderQueueInterface.h b/core/pipeline/queue/BoundedSenderQueueInterface.h similarity index 90% rename from core/queue/BoundedSenderQueueInterface.h rename to core/pipeline/queue/BoundedSenderQueueInterface.h index 7aefe1e1cd..526f7f93f9 100644 --- a/core/queue/BoundedSenderQueueInterface.h +++ b/core/pipeline/queue/BoundedSenderQueueInterface.h @@ -22,11 +22,11 @@ #include #include "common/FeedbackInterface.h" -#include "queue/BoundedQueueInterface.h" -#include "queue/QueueKey.h" -#include "queue/SenderQueueItem.h" -#include "sender/ConcurrencyLimiter.h" -#include "sender/RateLimiter.h" +#include "pipeline/queue/BoundedQueueInterface.h" +#include "pipeline/queue/QueueKey.h" +#include "pipeline/queue/SenderQueueItem.h" +#include "pipeline/limiter/ConcurrencyLimiter.h" +#include "pipeline/limiter/RateLimiter.h" namespace logtail { diff --git a/core/queue/CircularProcessQueue.cpp b/core/pipeline/queue/CircularProcessQueue.cpp similarity index 95% rename from core/queue/CircularProcessQueue.cpp rename to core/pipeline/queue/CircularProcessQueue.cpp index af219add94..5962bc6c05 100644 --- a/core/queue/CircularProcessQueue.cpp +++ b/core/pipeline/queue/CircularProcessQueue.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/CircularProcessQueue.h" +#include "pipeline/queue/CircularProcessQueue.h" #include "logger/Logger.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/QueueKeyManager.h" using namespace std; diff --git a/core/queue/CircularProcessQueue.h b/core/pipeline/queue/CircularProcessQueue.h similarity index 94% rename from core/queue/CircularProcessQueue.h rename to core/pipeline/queue/CircularProcessQueue.h index 8acfcdbff3..45f50a1959 100644 --- a/core/queue/CircularProcessQueue.h +++ b/core/pipeline/queue/CircularProcessQueue.h @@ -20,8 +20,8 @@ #include #include -#include "queue/ProcessQueueInterface.h" -#include "queue/QueueInterface.h" +#include "pipeline/queue/ProcessQueueInterface.h" +#include "pipeline/queue/QueueInterface.h" namespace logtail { diff --git a/core/queue/ExactlyOnceQueueManager.cpp b/core/pipeline/queue/ExactlyOnceQueueManager.cpp similarity index 97% rename from core/queue/ExactlyOnceQueueManager.cpp rename to core/pipeline/queue/ExactlyOnceQueueManager.cpp index 299a76f0e1..79433b08de 100644 --- a/core/queue/ExactlyOnceQueueManager.cpp +++ b/core/pipeline/queue/ExactlyOnceQueueManager.cpp @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" #include "common/Flags.h" #include "common/TimeUtil.h" -#include "input/InputFeedbackInterfaceRegistry.h" -#include "input/InputFile.h" +#include "plugin/input/InputFeedbackInterfaceRegistry.h" +#include "plugin/input/InputFile.h" #include "logger/Logger.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" DEFINE_FLAG_INT32(logtail_queue_gc_threshold_sec, "2min", 2 * 60); DEFINE_FLAG_INT64(logtail_queue_max_used_time_per_round_in_msec, "500ms", 500); diff --git a/core/queue/ExactlyOnceQueueManager.h b/core/pipeline/queue/ExactlyOnceQueueManager.h similarity index 91% rename from core/queue/ExactlyOnceQueueManager.h rename to core/pipeline/queue/ExactlyOnceQueueManager.h index 93a2c367b4..62e4adad3c 100644 --- a/core/queue/ExactlyOnceQueueManager.h +++ b/core/pipeline/queue/ExactlyOnceQueueManager.h @@ -27,13 +27,13 @@ #include "checkpoint/RangeCheckpoint.h" #include "common/FeedbackInterface.h" -#include "queue/BoundedProcessQueue.h" -#include "queue/ExactlyOnceSenderQueue.h" -#include "queue/ProcessQueueItem.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKey.h" -#include "queue/QueueParam.h" -#include "queue/SenderQueueItem.h" +#include "pipeline/queue/BoundedProcessQueue.h" +#include "pipeline/queue/ExactlyOnceSenderQueue.h" +#include "pipeline/queue/ProcessQueueItem.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKey.h" +#include "pipeline/queue/QueueParam.h" +#include "pipeline/queue/SenderQueueItem.h" namespace logtail { diff --git a/core/queue/ExactlyOnceSenderQueue.cpp b/core/pipeline/queue/ExactlyOnceSenderQueue.cpp similarity index 96% rename from core/queue/ExactlyOnceSenderQueue.cpp rename to core/pipeline/queue/ExactlyOnceSenderQueue.cpp index 1a2bca236b..7402329d68 100644 --- a/core/queue/ExactlyOnceSenderQueue.cpp +++ b/core/pipeline/queue/ExactlyOnceSenderQueue.cpp @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/ExactlyOnceSenderQueue.h" +#include "pipeline/queue/ExactlyOnceSenderQueue.h" #include -#include "flusher/sls/FlusherSLS.h" +#include "plugin/flusher/sls/FlusherSLS.h" #include "logger/Logger.h" -#include "queue/SLSSenderQueueItem.h" +#include "pipeline/queue/SLSSenderQueueItem.h" using namespace std; diff --git a/core/queue/ExactlyOnceSenderQueue.h b/core/pipeline/queue/ExactlyOnceSenderQueue.h similarity index 93% rename from core/queue/ExactlyOnceSenderQueue.h rename to core/pipeline/queue/ExactlyOnceSenderQueue.h index 8617b3fd39..6f5cb3ada0 100644 --- a/core/queue/ExactlyOnceSenderQueue.h +++ b/core/pipeline/queue/ExactlyOnceSenderQueue.h @@ -21,9 +21,9 @@ #include "checkpoint/RangeCheckpoint.h" #include "logger/Logger.h" -#include "queue/BoundedSenderQueueInterface.h" -#include "queue/QueueKey.h" -#include "queue/SenderQueueItem.h" +#include "pipeline/queue/BoundedSenderQueueInterface.h" +#include "pipeline/queue/QueueKey.h" +#include "pipeline/queue/SenderQueueItem.h" namespace logtail { diff --git a/core/queue/ProcessQueueInterface.cpp b/core/pipeline/queue/ProcessQueueInterface.cpp similarity index 92% rename from core/queue/ProcessQueueInterface.cpp rename to core/pipeline/queue/ProcessQueueInterface.cpp index 561c536728..afc78620ea 100644 --- a/core/queue/ProcessQueueInterface.cpp +++ b/core/pipeline/queue/ProcessQueueInterface.cpp @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/ProcessQueueInterface.h" +#include "pipeline/queue/ProcessQueueInterface.h" -#include "queue/BoundedSenderQueueInterface.h" +#include "pipeline/queue/BoundedSenderQueueInterface.h" using namespace std; diff --git a/core/queue/ProcessQueueInterface.h b/core/pipeline/queue/ProcessQueueInterface.h similarity index 96% rename from core/queue/ProcessQueueInterface.h rename to core/pipeline/queue/ProcessQueueInterface.h index 72cbed981f..363bc55f27 100644 --- a/core/queue/ProcessQueueInterface.h +++ b/core/pipeline/queue/ProcessQueueInterface.h @@ -21,8 +21,8 @@ #include #include -#include "queue/ProcessQueueItem.h" -#include "queue/QueueInterface.h" +#include "pipeline/queue/ProcessQueueItem.h" +#include "pipeline/queue/QueueInterface.h" namespace logtail { diff --git a/core/queue/ProcessQueueItem.h b/core/pipeline/queue/ProcessQueueItem.h similarity index 100% rename from core/queue/ProcessQueueItem.h rename to core/pipeline/queue/ProcessQueueItem.h diff --git a/core/queue/ProcessQueueManager.cpp b/core/pipeline/queue/ProcessQueueManager.cpp similarity index 98% rename from core/queue/ProcessQueueManager.cpp rename to core/pipeline/queue/ProcessQueueManager.cpp index 65561d6195..0fc012ec24 100644 --- a/core/queue/ProcessQueueManager.cpp +++ b/core/pipeline/queue/ProcessQueueManager.cpp @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/ProcessQueueManager.h" +#include "pipeline/queue/ProcessQueueManager.h" #include "common/Flags.h" -#include "queue/BoundedProcessQueue.h" -#include "queue/CircularProcessQueue.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/BoundedProcessQueue.h" +#include "pipeline/queue/CircularProcessQueue.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" DEFINE_FLAG_INT32(bounded_process_queue_capacity, "", 15); diff --git a/core/queue/ProcessQueueManager.h b/core/pipeline/queue/ProcessQueueManager.h similarity index 93% rename from core/queue/ProcessQueueManager.h rename to core/pipeline/queue/ProcessQueueManager.h index 070afa4b2b..e3d6b464b3 100644 --- a/core/queue/ProcessQueueManager.h +++ b/core/pipeline/queue/ProcessQueueManager.h @@ -26,11 +26,11 @@ #include #include "common/FeedbackInterface.h" -#include "queue/BoundedSenderQueueInterface.h" -#include "queue/ProcessQueueInterface.h" -#include "queue/ProcessQueueItem.h" -#include "queue/QueueKey.h" -#include "queue/QueueParam.h" +#include "pipeline/queue/BoundedSenderQueueInterface.h" +#include "pipeline/queue/ProcessQueueInterface.h" +#include "pipeline/queue/ProcessQueueItem.h" +#include "pipeline/queue/QueueKey.h" +#include "pipeline/queue/QueueParam.h" namespace logtail { diff --git a/core/queue/QueueInterface.h b/core/pipeline/queue/QueueInterface.h similarity index 97% rename from core/queue/QueueInterface.h rename to core/pipeline/queue/QueueInterface.h index 19755e98af..12346f98d9 100644 --- a/core/queue/QueueInterface.h +++ b/core/pipeline/queue/QueueInterface.h @@ -16,7 +16,7 @@ #pragma once -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { diff --git a/core/queue/QueueKey.h b/core/pipeline/queue/QueueKey.h similarity index 100% rename from core/queue/QueueKey.h rename to core/pipeline/queue/QueueKey.h diff --git a/core/queue/QueueKeyManager.cpp b/core/pipeline/queue/QueueKeyManager.cpp similarity index 97% rename from core/queue/QueueKeyManager.cpp rename to core/pipeline/queue/QueueKeyManager.cpp index 06d6832f02..8bd2a16d85 100644 --- a/core/queue/QueueKeyManager.cpp +++ b/core/pipeline/queue/QueueKeyManager.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/QueueKeyManager.h" using namespace std; diff --git a/core/queue/QueueKeyManager.h b/core/pipeline/queue/QueueKeyManager.h similarity index 97% rename from core/queue/QueueKeyManager.h rename to core/pipeline/queue/QueueKeyManager.h index a9abd39762..8ec5b1737f 100644 --- a/core/queue/QueueKeyManager.h +++ b/core/pipeline/queue/QueueKeyManager.h @@ -20,7 +20,7 @@ #include #include -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { diff --git a/core/queue/QueueParam.h b/core/pipeline/queue/QueueParam.h similarity index 100% rename from core/queue/QueueParam.h rename to core/pipeline/queue/QueueParam.h diff --git a/core/queue/SLSSenderQueueItem.h b/core/pipeline/queue/SLSSenderQueueItem.h similarity index 97% rename from core/queue/SLSSenderQueueItem.h rename to core/pipeline/queue/SLSSenderQueueItem.h index 920b7d1593..1d34efe40f 100644 --- a/core/queue/SLSSenderQueueItem.h +++ b/core/pipeline/queue/SLSSenderQueueItem.h @@ -17,7 +17,7 @@ #pragma once #include "checkpoint/RangeCheckpoint.h" -#include "queue/SenderQueueItem.h" +#include "pipeline/queue/SenderQueueItem.h" namespace logtail { diff --git a/core/queue/SenderQueue.cpp b/core/pipeline/queue/SenderQueue.cpp similarity index 98% rename from core/queue/SenderQueue.cpp rename to core/pipeline/queue/SenderQueue.cpp index aaa88bbd79..ad4d837aba 100644 --- a/core/queue/SenderQueue.cpp +++ b/core/pipeline/queue/SenderQueue.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/SenderQueue.h" +#include "pipeline/queue/SenderQueue.h" #include "logger/Logger.h" diff --git a/core/queue/SenderQueue.h b/core/pipeline/queue/SenderQueue.h similarity index 91% rename from core/queue/SenderQueue.h rename to core/pipeline/queue/SenderQueue.h index 4d7e1f330e..310c981d96 100644 --- a/core/queue/SenderQueue.h +++ b/core/pipeline/queue/SenderQueue.h @@ -19,9 +19,9 @@ #include #include -#include "queue/QueueKey.h" -#include "queue/BoundedSenderQueueInterface.h" -#include "queue/SenderQueueItem.h" +#include "pipeline/queue/QueueKey.h" +#include "pipeline/queue/BoundedSenderQueueInterface.h" +#include "pipeline/queue/SenderQueueItem.h" namespace logtail { diff --git a/core/queue/SenderQueueItem.h b/core/pipeline/queue/SenderQueueItem.h similarity index 98% rename from core/queue/SenderQueueItem.h rename to core/pipeline/queue/SenderQueueItem.h index 70bc984edf..9e3b83c979 100644 --- a/core/queue/SenderQueueItem.h +++ b/core/pipeline/queue/SenderQueueItem.h @@ -21,7 +21,7 @@ #include #include -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { diff --git a/core/queue/SenderQueueManager.cpp b/core/pipeline/queue/SenderQueueManager.cpp similarity index 84% rename from core/queue/SenderQueueManager.cpp rename to core/pipeline/queue/SenderQueueManager.cpp index a807d36c6a..20987e0b3f 100644 --- a/core/queue/SenderQueueManager.cpp +++ b/core/pipeline/queue/SenderQueueManager.cpp @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/SenderQueueManager.h" +#include "pipeline/queue/SenderQueueManager.h" #include "common/Flags.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" DEFINE_FLAG_INT32(sender_queue_gc_threshold_sec, "30s", 30); DEFINE_FLAG_INT32(sender_queue_capacity, "", 10); @@ -31,22 +31,16 @@ SenderQueueManager::SenderQueueManager() : mQueueParam(INT32_FLAG(sender_queue_c bool SenderQueueManager::CreateQueue(QueueKey key, vector>&& concurrencyLimiters, uint32_t maxRate) { - { - lock_guard lock(mGCMux); - mQueueDeletionTimeMap.erase(key); - } - { - lock_guard lock(mQueueMux); - auto iter = mQueues.find(key); - if (iter == mQueues.end()) { - mQueues.try_emplace( - key, mQueueParam.GetCapacity(), mQueueParam.GetLowWatermark(), mQueueParam.GetHighWatermark(), key); - iter = mQueues.find(key); - } - iter->second.SetConcurrencyLimiters(std::move(concurrencyLimiters)); - iter->second.SetRateLimiter(maxRate); - return true; + lock_guard lock(mQueueMux); + auto iter = mQueues.find(key); + if (iter == mQueues.end()) { + mQueues.try_emplace( + key, mQueueParam.GetCapacity(), mQueueParam.GetLowWatermark(), mQueueParam.GetHighWatermark(), key); + iter = mQueues.find(key); } + iter->second.SetConcurrencyLimiters(std::move(concurrencyLimiters)); + iter->second.SetRateLimiter(maxRate); + return true; } SenderQueue* SenderQueueManager::GetQueue(QueueKey key) { @@ -76,6 +70,16 @@ bool SenderQueueManager::DeleteQueue(QueueKey key) { return true; } +bool SenderQueueManager::ReuseQueue(QueueKey key) { + lock_guard lock(mGCMux); + auto iter = mQueueDeletionTimeMap.find(key); + if (iter == mQueueDeletionTimeMap.end()) { + return false; + } + mQueueDeletionTimeMap.erase(iter); + return true; +} + int SenderQueueManager::PushQueue(QueueKey key, unique_ptr&& item) { { lock_guard lock(mQueueMux); @@ -191,6 +195,11 @@ void SenderQueueManager::Clear() { mQueues.clear(); mQueueDeletionTimeMap.clear(); } + +bool SenderQueueManager::IsQueueMarkedDeleted(QueueKey key) { + lock_guard lock(mGCMux); + return mQueueDeletionTimeMap.find(key) != mQueueDeletionTimeMap.end(); +} #endif } // namespace logtail diff --git a/core/queue/SenderQueueManager.h b/core/pipeline/queue/SenderQueueManager.h similarity index 90% rename from core/queue/SenderQueueManager.h rename to core/pipeline/queue/SenderQueueManager.h index e7d6a108e0..08b5508794 100644 --- a/core/queue/SenderQueueManager.h +++ b/core/pipeline/queue/SenderQueueManager.h @@ -24,11 +24,11 @@ #include #include "common/FeedbackInterface.h" -#include "queue/QueueParam.h" -#include "queue/SenderQueue.h" -#include "queue/SenderQueueItem.h" -#include "sender/ConcurrencyLimiter.h" -#include "sender/RateLimiter.h" +#include "pipeline/queue/QueueParam.h" +#include "pipeline/queue/SenderQueue.h" +#include "pipeline/queue/SenderQueueItem.h" +#include "pipeline/limiter/ConcurrencyLimiter.h" +#include "pipeline/limiter/RateLimiter.h" namespace logtail { @@ -52,6 +52,7 @@ class SenderQueueManager : public FeedbackInterface { uint32_t maxRate = 0); SenderQueue* GetQueue(QueueKey key); bool DeleteQueue(QueueKey key); + bool ReuseQueue(QueueKey key); // 0: success, 1: queue is full, 2: queue not found int PushQueue(QueueKey key, std::unique_ptr&& item); void GetAllAvailableItems(std::vector& items, bool withLimits = true); @@ -67,6 +68,7 @@ class SenderQueueManager : public FeedbackInterface { #ifdef APSARA_UNIT_TEST_MAIN void Clear(); + bool IsQueueMarkedDeleted(QueueKey key); #endif private: diff --git a/core/route/Condition.cpp b/core/pipeline/route/Condition.cpp similarity index 99% rename from core/route/Condition.cpp rename to core/pipeline/route/Condition.cpp index 4862348581..392963da02 100644 --- a/core/route/Condition.cpp +++ b/core/pipeline/route/Condition.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "route/Condition.h" +#include "pipeline/route/Condition.h" #include "common/ParamExtractor.h" diff --git a/core/route/Condition.h b/core/pipeline/route/Condition.h similarity index 100% rename from core/route/Condition.h rename to core/pipeline/route/Condition.h diff --git a/core/route/Router.cpp b/core/pipeline/route/Router.cpp similarity index 94% rename from core/route/Router.cpp rename to core/pipeline/route/Router.cpp index 118e0b8d11..e9c4a817cd 100644 --- a/core/route/Router.cpp +++ b/core/pipeline/route/Router.cpp @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "route/Router.h" +#include "pipeline/route/Router.h" #include "common/ParamExtractor.h" #include "pipeline/Pipeline.h" -#include "plugin/interface/Flusher.h" +#include "pipeline/plugin/interface/Flusher.h" using namespace std; diff --git a/core/route/Router.h b/core/pipeline/route/Router.h similarity index 96% rename from core/route/Router.h rename to core/pipeline/route/Router.h index 9a72ca12ac..c46dedca6e 100644 --- a/core/route/Router.h +++ b/core/pipeline/route/Router.h @@ -22,7 +22,7 @@ #include #include "models/PipelineEventGroup.h" -#include "route/Condition.h" +#include "pipeline/route/Condition.h" namespace logtail { diff --git a/core/serializer/SLSSerializer.cpp b/core/pipeline/serializer/SLSSerializer.cpp similarity index 97% rename from core/serializer/SLSSerializer.cpp rename to core/pipeline/serializer/SLSSerializer.cpp index dfad350ab4..600e709f68 100644 --- a/core/serializer/SLSSerializer.cpp +++ b/core/pipeline/serializer/SLSSerializer.cpp @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "serializer/SLSSerializer.h" +#include "pipeline/serializer/SLSSerializer.h" #include "application/Application.h" #include "common/Flags.h" #include "common/TimeUtil.h" -#include "compression/CompressType.h" -#include "flusher/sls/FlusherSLS.h" +#include "pipeline/compression/CompressType.h" +#include "plugin/flusher/sls/FlusherSLS.h" DEFINE_FLAG_INT32(max_send_log_group_size, "bytes", 10 * 1024 * 1024); diff --git a/core/serializer/SLSSerializer.h b/core/pipeline/serializer/SLSSerializer.h similarity index 97% rename from core/serializer/SLSSerializer.h rename to core/pipeline/serializer/SLSSerializer.h index 319c08127f..f19d0f8e3a 100644 --- a/core/serializer/SLSSerializer.h +++ b/core/pipeline/serializer/SLSSerializer.h @@ -19,7 +19,7 @@ #include #include -#include "serializer/Serializer.h" +#include "pipeline/serializer/Serializer.h" namespace logtail { diff --git a/core/serializer/Serializer.h b/core/pipeline/serializer/Serializer.h similarity index 93% rename from core/serializer/Serializer.h rename to core/pipeline/serializer/Serializer.h index 53f9507e02..968f882a91 100644 --- a/core/serializer/Serializer.h +++ b/core/pipeline/serializer/Serializer.h @@ -18,9 +18,9 @@ #include -#include "batch/BatchedEvents.h" +#include "pipeline/batch/BatchedEvents.h" #include "models/PipelineEventPtr.h" -#include "plugin/interface/Flusher.h" +#include "pipeline/plugin/interface/Flusher.h" namespace logtail { diff --git a/core/flusher/blackhole/FlusherBlackHole.cpp b/core/plugin/flusher/blackhole/FlusherBlackHole.cpp similarity index 91% rename from core/flusher/blackhole/FlusherBlackHole.cpp rename to core/plugin/flusher/blackhole/FlusherBlackHole.cpp index 71e3b638bf..515beb0dd4 100644 --- a/core/flusher/blackhole/FlusherBlackHole.cpp +++ b/core/plugin/flusher/blackhole/FlusherBlackHole.cpp @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/blackhole/FlusherBlackHole.h" +#include "plugin/flusher/blackhole/FlusherBlackHole.h" -#include "queue/SenderQueueManager.h" +#include "pipeline/queue/SenderQueueManager.h" using namespace std; diff --git a/core/flusher/blackhole/FlusherBlackHole.h b/core/plugin/flusher/blackhole/FlusherBlackHole.h similarity index 95% rename from core/flusher/blackhole/FlusherBlackHole.h rename to core/plugin/flusher/blackhole/FlusherBlackHole.h index 4ff9317aec..d0a705cf71 100644 --- a/core/flusher/blackhole/FlusherBlackHole.h +++ b/core/plugin/flusher/blackhole/FlusherBlackHole.h @@ -16,7 +16,7 @@ #pragma once -#include "plugin/interface/Flusher.h" +#include "pipeline/plugin/interface/Flusher.h" namespace logtail { diff --git a/core/flusher/flusher.cmake b/core/plugin/flusher/flusher.cmake similarity index 80% rename from core/flusher/flusher.cmake rename to core/plugin/flusher/flusher.cmake index 7d04d8ac96..7243c1ca25 100644 --- a/core/flusher/flusher.cmake +++ b/core/plugin/flusher/flusher.cmake @@ -17,7 +17,7 @@ include_directories(flusher) # Add source files -file(GLOB_RECURSE THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/flusher/*.c ${CMAKE_SOURCE_DIR}/flusher/*.cc ${CMAKE_SOURCE_DIR}/flusher/*.cpp ${CMAKE_SOURCE_DIR}/flusher/*.h) +file(GLOB_RECURSE THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/flusher/*.c ${CMAKE_SOURCE_DIR}/plugin/flusher/*.cc ${CMAKE_SOURCE_DIR}/plugin/flusher/*.cpp ${CMAKE_SOURCE_DIR}/plugin/flusher/*.h) # Set source files to parent set(PLUGIN_SOURCE_FILES_CORE ${PLUGIN_SOURCE_FILES_CORE} ${THIS_SOURCE_FILES_LIST}) diff --git a/core/flusher/links.cmake b/core/plugin/flusher/links.cmake similarity index 100% rename from core/flusher/links.cmake rename to core/plugin/flusher/links.cmake diff --git a/core/flusher/sls/DiskBufferWriter.cpp b/core/plugin/flusher/sls/DiskBufferWriter.cpp similarity index 99% rename from core/flusher/sls/DiskBufferWriter.cpp rename to core/plugin/flusher/sls/DiskBufferWriter.cpp index 968f91b7ac..49bb2789d0 100644 --- a/core/flusher/sls/DiskBufferWriter.cpp +++ b/core/plugin/flusher/sls/DiskBufferWriter.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/DiskBufferWriter.h" +#include "plugin/flusher/sls/DiskBufferWriter.h" #include "app_config/AppConfig.h" #include "application/Application.h" @@ -22,16 +22,16 @@ #include "common/FileSystemUtil.h" #include "common/RuntimeUtil.h" #include "common/StringTools.h" -#include "flusher/sls/FlusherSLS.h" -#include "flusher/sls/SLSClientManager.h" -#include "log_pb/sls_logs.pb.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "plugin/flusher/sls/SLSClientManager.h" +#include "protobuf/sls/sls_logs.pb.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" #include "profile_sender/ProfileSender.h" -#include "queue/QueueKeyManager.h" -#include "queue/SLSSenderQueueItem.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SLSSenderQueueItem.h" #include "sdk/Exception.h" -#include "sender/RateLimiter.h" +#include "pipeline/limiter/RateLimiter.h" #include "sls_control/SLSControl.h" DEFINE_FLAG_INT32(write_secondary_wait_timeout, "interval of dump seconary buffer from memory to file, seconds", 2); diff --git a/core/flusher/sls/DiskBufferWriter.h b/core/plugin/flusher/sls/DiskBufferWriter.h similarity index 96% rename from core/flusher/sls/DiskBufferWriter.h rename to core/plugin/flusher/sls/DiskBufferWriter.h index e5b6afe1a6..d3c1a391fc 100644 --- a/core/flusher/sls/DiskBufferWriter.h +++ b/core/plugin/flusher/sls/DiskBufferWriter.h @@ -24,9 +24,9 @@ #include #include "common/SafeQueue.h" -#include "flusher/sls/SendResult.h" -#include "log_pb/logtail_buffer_meta.pb.h" -#include "queue/SenderQueueItem.h" +#include "plugin/flusher/sls/SendResult.h" +#include "protobuf/sls/logtail_buffer_meta.pb.h" +#include "pipeline/queue/SenderQueueItem.h" #include "sdk/Client.h" namespace logtail { diff --git a/core/flusher/sls/FlusherSLS.cpp b/core/plugin/flusher/sls/FlusherSLS.cpp similarity index 97% rename from core/flusher/sls/FlusherSLS.cpp rename to core/plugin/flusher/sls/FlusherSLS.cpp index 0e16a394f3..797739ef2f 100644 --- a/core/flusher/sls/FlusherSLS.cpp +++ b/core/plugin/flusher/sls/FlusherSLS.cpp @@ -12,33 +12,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/FlusherSLS.h" +#include "plugin/flusher/sls/FlusherSLS.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif #include "app_config/AppConfig.h" -#include "batch/FlushStrategy.h" +#include "pipeline/batch/FlushStrategy.h" #include "common/EndpointUtil.h" #include "common/HashUtil.h" #include "common/LogtailCommonFlags.h" #include "common/ParamExtractor.h" #include "common/TimeUtil.h" -#include "compression/CompressorFactory.h" -#include "flusher/sls/PackIdManager.h" -#include "flusher/sls/SLSClientManager.h" -#include "flusher/sls/SLSResponse.h" -#include "flusher/sls/SendResult.h" +#include "pipeline/compression/CompressorFactory.h" +#include "plugin/flusher/sls/PackIdManager.h" +#include "plugin/flusher/sls/SLSClientManager.h" +#include "plugin/flusher/sls/SLSResponse.h" +#include "plugin/flusher/sls/SendResult.h" #include "pipeline/Pipeline.h" #include "profile_sender/ProfileSender.h" -#include "queue/QueueKeyManager.h" -#include "queue/SLSSenderQueueItem.h" -#include "queue/SenderQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SLSSenderQueueItem.h" +#include "pipeline/queue/SenderQueueManager.h" #include "sdk/Common.h" -#include "sender/FlusherRunner.h" +#include "runner/FlusherRunner.h" #include "sls_control/SLSControl.h" // TODO: temporarily used here -#include "flusher/sls/DiskBufferWriter.h" +#include "plugin/flusher/sls/DiskBufferWriter.h" #include "pipeline/PipelineManager.h" using namespace std; @@ -494,6 +494,8 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline } bool FlusherSLS::Start() { + Flusher::Start(); + InitResource(); IncreaseProjectReferenceCnt(mProject); @@ -815,10 +817,18 @@ bool FlusherSLS::Send(string&& data, const string& shardHashKey, const string& l } else { compressedData = data; } + + QueueKey key = mQueueKey; + if (!HasContext()) { + key = QueueKeyManager::GetInstance()->GetKey(mProject + "-" + mLogstore); + if (SenderQueueManager::GetInstance()->GetQueue(key) == nullptr) { + SenderQueueManager::GetInstance()->CreateQueue(key, vector>()); + } + } return Flusher::PushToQueue(make_unique(std::move(compressedData), data.size(), this, - mQueueKey, + key, logstore.empty() ? mLogstore : logstore, RawDataType::EVENT_GROUP, shardHashKey)); @@ -833,7 +843,7 @@ void FlusherSLS::GenerateGoPlugin(const Json::Value& config, Json::Value& res) c } if (!detail.empty()) { Json::Value plugin(Json::objectValue); - plugin["type"] = "flusher_sls"; + plugin["type"] = Pipeline::GenPluginTypeWithID("flusher_sls", mContext->GetPipeline().GetNowPluginID()); plugin["detail"] = detail; res["flushers"].append(plugin); } diff --git a/core/flusher/sls/FlusherSLS.h b/core/plugin/flusher/sls/FlusherSLS.h similarity index 93% rename from core/flusher/sls/FlusherSLS.h rename to core/plugin/flusher/sls/FlusherSLS.h index c5ba6bbb07..2840b94fe4 100644 --- a/core/flusher/sls/FlusherSLS.h +++ b/core/plugin/flusher/sls/FlusherSLS.h @@ -24,20 +24,17 @@ #include #include -#include "batch/BatchStatus.h" -#include "batch/Batcher.h" -#include "compression/Compressor.h" +#include "pipeline/batch/BatchStatus.h" +#include "pipeline/batch/Batcher.h" +#include "pipeline/compression/Compressor.h" #include "models/PipelineEventGroup.h" -#include "plugin/interface/HttpFlusher.h" -#include "sender/ConcurrencyLimiter.h" -#include "serializer/SLSSerializer.h" +#include "pipeline/plugin/interface/HttpFlusher.h" +#include "pipeline/limiter/ConcurrencyLimiter.h" +#include "pipeline/serializer/SLSSerializer.h" namespace logtail { class FlusherSLS : public HttpFlusher { - // TODO: temporarily used - friend class ProfileSender; - public: enum class TelemetryType { LOG, METRIC }; @@ -85,6 +82,9 @@ class FlusherSLS : public HttpFlusher { uint32_t mMaxSendRate = 0; // preserved only for exactly once uint32_t mFlowControlExpireTime = 0; + // TODO: temporarily public for profile + std::unique_ptr mCompressor; + private: static const std::unordered_set sNativeParam; @@ -124,7 +124,6 @@ class FlusherSLS : public HttpFlusher { Batcher mBatcher; std::unique_ptr mGroupSerializer; std::unique_ptr>> mGroupListSerializer; - std::unique_ptr mCompressor; #ifdef APSARA_UNIT_TEST_MAIN friend class FlusherSLSUnittest; diff --git a/core/flusher/sls/PackIdManager.cpp b/core/plugin/flusher/sls/PackIdManager.cpp similarity index 96% rename from core/flusher/sls/PackIdManager.cpp rename to core/plugin/flusher/sls/PackIdManager.cpp index e5ce352e47..16bfe7e7fd 100644 --- a/core/flusher/sls/PackIdManager.cpp +++ b/core/plugin/flusher/sls/PackIdManager.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/PackIdManager.h" +#include "plugin/flusher/sls/PackIdManager.h" using namespace std; diff --git a/core/flusher/sls/PackIdManager.h b/core/plugin/flusher/sls/PackIdManager.h similarity index 100% rename from core/flusher/sls/PackIdManager.h rename to core/plugin/flusher/sls/PackIdManager.h diff --git a/core/flusher/sls/SLSClientManager.cpp b/core/plugin/flusher/sls/SLSClientManager.cpp similarity index 99% rename from core/flusher/sls/SLSClientManager.cpp rename to core/plugin/flusher/sls/SLSClientManager.cpp index 00cfe7e3e2..f729bbeae4 100644 --- a/core/flusher/sls/SLSClientManager.cpp +++ b/core/plugin/flusher/sls/SLSClientManager.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/SLSClientManager.h" +#include "plugin/flusher/sls/SLSClientManager.h" #include "app_config/AppConfig.h" #include "common/EndpointUtil.h" @@ -20,8 +20,8 @@ #include "common/LogtailCommonFlags.h" #include "common/StringTools.h" #include "common/TimeUtil.h" -#include "flusher/sls/FlusherSLS.h" -#include "flusher/sls/SendResult.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "plugin/flusher/sls/SendResult.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "sdk/Exception.h" diff --git a/core/flusher/sls/SLSClientManager.h b/core/plugin/flusher/sls/SLSClientManager.h similarity index 100% rename from core/flusher/sls/SLSClientManager.h rename to core/plugin/flusher/sls/SLSClientManager.h diff --git a/core/flusher/sls/SLSResponse.cpp b/core/plugin/flusher/sls/SLSResponse.cpp similarity index 98% rename from core/flusher/sls/SLSResponse.cpp rename to core/plugin/flusher/sls/SLSResponse.cpp index ca433b0c2a..9b5de72f59 100644 --- a/core/flusher/sls/SLSResponse.cpp +++ b/core/plugin/flusher/sls/SLSResponse.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/SLSResponse.h" +#include "plugin/flusher/sls/SLSResponse.h" #include "common/ErrorUtil.h" #include "common/StringTools.h" diff --git a/core/flusher/sls/SLSResponse.h b/core/plugin/flusher/sls/SLSResponse.h similarity index 100% rename from core/flusher/sls/SLSResponse.h rename to core/plugin/flusher/sls/SLSResponse.h diff --git a/core/flusher/sls/SendResult.cpp b/core/plugin/flusher/sls/SendResult.cpp similarity index 97% rename from core/flusher/sls/SendResult.cpp rename to core/plugin/flusher/sls/SendResult.cpp index dc07f98d5e..955bff6a52 100644 --- a/core/flusher/sls/SendResult.cpp +++ b/core/plugin/flusher/sls/SendResult.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/SendResult.h" +#include "plugin/flusher/sls/SendResult.h" #include "sdk/Common.h" diff --git a/core/flusher/sls/SendResult.h b/core/plugin/flusher/sls/SendResult.h similarity index 100% rename from core/flusher/sls/SendResult.h rename to core/plugin/flusher/sls/SendResult.h diff --git a/core/input/InputContainerStdio.cpp b/core/plugin/input/InputContainerStdio.cpp similarity index 97% rename from core/input/InputContainerStdio.cpp rename to core/plugin/input/InputContainerStdio.cpp index 82cb068d23..a4147c82f1 100644 --- a/core/input/InputContainerStdio.cpp +++ b/core/plugin/input/InputContainerStdio.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "input/InputContainerStdio.h" +#include "plugin/input/InputContainerStdio.h" #include "app_config/AppConfig.h" #include "common/FileSystemUtil.h" @@ -21,11 +21,11 @@ #include "file_server/FileServer.h" #include "monitor/MetricConstants.h" #include "pipeline/Pipeline.h" -#include "plugin/PluginRegistry.h" -#include "processor/inner/ProcessorMergeMultilineLogNative.h" -#include "processor/inner/ProcessorParseContainerLogNative.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorTagNative.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorTagNative.h" using namespace std; @@ -69,7 +69,7 @@ bool InputContainerStdio::Init(const Json::Value& config, Json::Value& optionalG if (!mContainerDiscovery.Init(config, *mContext, sName)) { return false; } - mContainerDiscovery.GenerateContainerMetaFetchingGoPipeline(optionalGoPipeline); + mContainerDiscovery.GenerateContainerMetaFetchingGoPipeline(optionalGoPipeline, nullptr, mContext->GetPipeline().GenNextPluginMeta(false)); if (!mFileReader.Init(config, *mContext, sName)) { return false; diff --git a/core/input/InputContainerStdio.h b/core/plugin/input/InputContainerStdio.h similarity index 95% rename from core/input/InputContainerStdio.h rename to core/plugin/input/InputContainerStdio.h index 87b6d8316e..0ee1ea11fd 100644 --- a/core/input/InputContainerStdio.h +++ b/core/plugin/input/InputContainerStdio.h @@ -22,8 +22,8 @@ #include "file_server/FileDiscoveryOptions.h" #include "file_server/MultilineOptions.h" #include "monitor/PluginMetricManager.h" -#include "plugin/interface/Input.h" -#include "reader/FileReaderOptions.h" +#include "pipeline/plugin/interface/Input.h" +#include "file_server/reader/FileReaderOptions.h" namespace logtail { diff --git a/core/input/InputEBPFFileSecurity.cpp b/core/plugin/input/InputEBPFFileSecurity.cpp similarity index 93% rename from core/input/InputEBPFFileSecurity.cpp rename to core/plugin/input/InputEBPFFileSecurity.cpp index cf6c813ec0..ed44311a4f 100644 --- a/core/input/InputEBPFFileSecurity.cpp +++ b/core/plugin/input/InputEBPFFileSecurity.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "input/InputEBPFFileSecurity.h" +#include "plugin/input/InputEBPFFileSecurity.h" // #include "ebpf/security/SecurityServer.h" #include "ebpf/include/export.h" @@ -32,7 +32,7 @@ bool InputEBPFFileSecurity::Init(const Json::Value& config, Json::Value& optiona LOG_WARNING(sLogger, ("pipeline already loaded", "FILE_SECURITY")("prev pipeline", prev_pipeline_name)("curr pipeline", pipeline_name)); return false; } - return mSecurityOptions.Init(ebpf::SecurityFilterType::FILE, config, mContext, sName); + return mSecurityOptions.Init(ebpf::SecurityProbeType::FILE, config, mContext, sName); } bool InputEBPFFileSecurity::Start() { diff --git a/core/input/InputEBPFFileSecurity.h b/core/plugin/input/InputEBPFFileSecurity.h similarity index 96% rename from core/input/InputEBPFFileSecurity.h rename to core/plugin/input/InputEBPFFileSecurity.h index 32957a0fc7..75a0cd9f1f 100644 --- a/core/input/InputEBPFFileSecurity.h +++ b/core/plugin/input/InputEBPFFileSecurity.h @@ -19,7 +19,7 @@ #include #include "ebpf/config.h" -#include "plugin/interface/Input.h" +#include "pipeline/plugin/interface/Input.h" #include "ebpf/eBPFServer.h" namespace logtail { diff --git a/core/input/InputEBPFNetworkObserver.cpp b/core/plugin/input/InputEBPFNetworkObserver.cpp similarity index 97% rename from core/input/InputEBPFNetworkObserver.cpp rename to core/plugin/input/InputEBPFNetworkObserver.cpp index ae653ef967..a72b43a2ca 100644 --- a/core/input/InputEBPFNetworkObserver.cpp +++ b/core/plugin/input/InputEBPFNetworkObserver.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "input/InputEBPFNetworkObserver.h" +#include "plugin/input/InputEBPFNetworkObserver.h" #include "ebpf/include/export.h" #include "ebpf/eBPFServer.h" diff --git a/core/input/InputEBPFNetworkObserver.h b/core/plugin/input/InputEBPFNetworkObserver.h similarity index 96% rename from core/input/InputEBPFNetworkObserver.h rename to core/plugin/input/InputEBPFNetworkObserver.h index dd6eff1be1..10ebedbeea 100644 --- a/core/input/InputEBPFNetworkObserver.h +++ b/core/plugin/input/InputEBPFNetworkObserver.h @@ -19,7 +19,7 @@ #include #include "ebpf/config.h" -#include "plugin/interface/Input.h" +#include "pipeline/plugin/interface/Input.h" #include "ebpf/include/export.h" namespace logtail { diff --git a/core/input/InputEBPFNetworkSecurity.cpp b/core/plugin/input/InputEBPFNetworkSecurity.cpp similarity index 93% rename from core/input/InputEBPFNetworkSecurity.cpp rename to core/plugin/input/InputEBPFNetworkSecurity.cpp index b6d8038425..d9ff9ec188 100644 --- a/core/input/InputEBPFNetworkSecurity.cpp +++ b/core/plugin/input/InputEBPFNetworkSecurity.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "input/InputEBPFNetworkSecurity.h" +#include "plugin/input/InputEBPFNetworkSecurity.h" #include "ebpf/include/export.h" #include "ebpf/eBPFServer.h" @@ -35,7 +35,7 @@ bool InputEBPFNetworkSecurity::Init(const Json::Value& config, Json::Value& opti return false; } - return mSecurityOptions.Init(ebpf::SecurityFilterType::NETWORK, config, mContext, sName); + return mSecurityOptions.Init(ebpf::SecurityProbeType::NETWORK, config, mContext, sName); } bool InputEBPFNetworkSecurity::Start() { diff --git a/core/input/InputEBPFNetworkSecurity.h b/core/plugin/input/InputEBPFNetworkSecurity.h similarity index 96% rename from core/input/InputEBPFNetworkSecurity.h rename to core/plugin/input/InputEBPFNetworkSecurity.h index e57fbab5e7..125fcbff33 100644 --- a/core/input/InputEBPFNetworkSecurity.h +++ b/core/plugin/input/InputEBPFNetworkSecurity.h @@ -19,7 +19,7 @@ #include #include "ebpf/config.h" -#include "plugin/interface/Input.h" +#include "pipeline/plugin/interface/Input.h" namespace logtail { diff --git a/core/input/InputEBPFProcessSecurity.cpp b/core/plugin/input/InputEBPFProcessSecurity.cpp similarity index 93% rename from core/input/InputEBPFProcessSecurity.cpp rename to core/plugin/input/InputEBPFProcessSecurity.cpp index bc684f046f..04b9696929 100644 --- a/core/input/InputEBPFProcessSecurity.cpp +++ b/core/plugin/input/InputEBPFProcessSecurity.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "input/InputEBPFProcessSecurity.h" +#include "plugin/input/InputEBPFProcessSecurity.h" #include "ebpf/include/export.h" #include "ebpf/eBPFServer.h" @@ -30,7 +30,7 @@ bool InputEBPFProcessSecurity::Init(const Json::Value& config, Json::Value& opti LOG_WARNING(sLogger, ("pipeline already loaded", "PROCESS_SECURITY")("prev pipeline", prev_pipeline_name)("curr pipeline", pipeline_name)); return false; } - return mSecurityOptions.Init(ebpf::SecurityFilterType::PROCESS, config, mContext, sName); + return mSecurityOptions.Init(ebpf::SecurityProbeType::PROCESS, config, mContext, sName); } bool InputEBPFProcessSecurity::Start() { diff --git a/core/input/InputEBPFProcessSecurity.h b/core/plugin/input/InputEBPFProcessSecurity.h similarity index 96% rename from core/input/InputEBPFProcessSecurity.h rename to core/plugin/input/InputEBPFProcessSecurity.h index 054dd6c059..c89210c986 100644 --- a/core/input/InputEBPFProcessSecurity.h +++ b/core/plugin/input/InputEBPFProcessSecurity.h @@ -19,7 +19,7 @@ #include #include "ebpf/config.h" -#include "plugin/interface/Input.h" +#include "pipeline/plugin/interface/Input.h" namespace logtail { diff --git a/core/input/InputFeedbackInterfaceRegistry.cpp b/core/plugin/input/InputFeedbackInterfaceRegistry.cpp similarity index 86% rename from core/input/InputFeedbackInterfaceRegistry.cpp rename to core/plugin/input/InputFeedbackInterfaceRegistry.cpp index bf4cc63eba..d4f2fb7826 100644 --- a/core/input/InputFeedbackInterfaceRegistry.cpp +++ b/core/plugin/input/InputFeedbackInterfaceRegistry.cpp @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "input/InputFeedbackInterfaceRegistry.h" +#include "plugin/input/InputFeedbackInterfaceRegistry.h" -#include "event/BlockEventManager.h" -#include "input/InputContainerStdio.h" -#include "input/InputFile.h" +#include "file_server/event/BlockEventManager.h" +#include "plugin/input/InputContainerStdio.h" +#include "plugin/input/InputFile.h" using namespace std; diff --git a/core/input/InputFeedbackInterfaceRegistry.h b/core/plugin/input/InputFeedbackInterfaceRegistry.h similarity index 100% rename from core/input/InputFeedbackInterfaceRegistry.h rename to core/plugin/input/InputFeedbackInterfaceRegistry.h diff --git a/core/input/InputFile.cpp b/core/plugin/input/InputFile.cpp similarity index 97% rename from core/input/InputFile.cpp rename to core/plugin/input/InputFile.cpp index 34e8264e88..d21c1ce483 100644 --- a/core/input/InputFile.cpp +++ b/core/plugin/input/InputFile.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "input/InputFile.h" +#include "plugin/input/InputFile.h" #include @@ -21,15 +21,15 @@ #include "common/JsonUtil.h" #include "common/LogtailCommonFlags.h" #include "common/ParamExtractor.h" -#include "config_manager/ConfigManager.h" +#include "file_server/ConfigManager.h" #include "file_server/FileServer.h" #include "monitor/MetricConstants.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineManager.h" -#include "plugin/PluginRegistry.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" -#include "processor/inner/ProcessorTagNative.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "plugin/processor/inner/ProcessorTagNative.h" using namespace std; @@ -85,7 +85,8 @@ bool InputFile::Init(const Json::Value& config, Json::Value& optionalGoPipeline) } mFileDiscovery.SetEnableContainerDiscoveryFlag(true); mFileDiscovery.SetDeduceAndSetContainerBaseDirFunc(DeduceAndSetContainerBaseDir); - mContainerDiscovery.GenerateContainerMetaFetchingGoPipeline(optionalGoPipeline, &mFileDiscovery); + mContainerDiscovery.GenerateContainerMetaFetchingGoPipeline( + optionalGoPipeline, &mFileDiscovery, mContext->GetPipeline().GenNextPluginMeta(false)); } if (!mFileReader.Init(config, *mContext, sName)) { diff --git a/core/input/InputFile.h b/core/plugin/input/InputFile.h similarity index 95% rename from core/input/InputFile.h rename to core/plugin/input/InputFile.h index 3da1ac9f4f..5410776de2 100644 --- a/core/input/InputFile.h +++ b/core/plugin/input/InputFile.h @@ -22,8 +22,8 @@ #include "file_server/FileDiscoveryOptions.h" #include "file_server/MultilineOptions.h" #include "monitor/PluginMetricManager.h" -#include "plugin/interface/Input.h" -#include "reader/FileReaderOptions.h" +#include "pipeline/plugin/interface/Input.h" +#include "file_server/reader/FileReaderOptions.h" namespace logtail { diff --git a/core/input/InputObserverNetwork.cpp b/core/plugin/input/InputObserverNetwork.cpp similarity index 96% rename from core/input/InputObserverNetwork.cpp rename to core/plugin/input/InputObserverNetwork.cpp index 0eb51c1f00..ab22c12848 100644 --- a/core/input/InputObserverNetwork.cpp +++ b/core/plugin/input/InputObserverNetwork.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "input/InputObserverNetwork.h" +#include "plugin/input/InputObserverNetwork.h" #include "observer/network/NetworkConfig.h" diff --git a/core/input/InputObserverNetwork.h b/core/plugin/input/InputObserverNetwork.h similarity index 95% rename from core/input/InputObserverNetwork.h rename to core/plugin/input/InputObserverNetwork.h index 05f39d7847..dbf7300bff 100644 --- a/core/input/InputObserverNetwork.h +++ b/core/plugin/input/InputObserverNetwork.h @@ -18,7 +18,7 @@ #include -#include "plugin/interface/Input.h" +#include "pipeline/plugin/interface/Input.h" namespace logtail { diff --git a/core/input/InputPrometheus.cpp b/core/plugin/input/InputPrometheus.cpp similarity index 92% rename from core/input/InputPrometheus.cpp rename to core/plugin/input/InputPrometheus.cpp index d60feb312f..5257ccc2a3 100644 --- a/core/input/InputPrometheus.cpp +++ b/core/plugin/input/InputPrometheus.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "input/InputPrometheus.h" +#include "plugin/input/InputPrometheus.h" #include #include @@ -26,9 +26,9 @@ #include "logger/Logger.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/inner/ProcessorPromParseMetricNative.h" -#include "processor/inner/ProcessorPromRelabelMetricNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/inner/ProcessorPromParseMetricNative.h" +#include "plugin/processor/inner/ProcessorPromRelabelMetricNative.h" #include "prometheus/Constants.h" #include "prometheus/PrometheusInputRunner.h" #include "prometheus/schedulers/TargetSubscriberScheduler.h" @@ -65,6 +65,7 @@ bool InputPrometheus::Init(const Json::Value& config, Json::Value&) { /// @brief register scrape job by PrometheusInputRunner bool InputPrometheus::Start() { LOG_INFO(sLogger, ("input config start", mJobName)); + PrometheusInputRunner::GetInstance()->Init(); mTargetSubscirber->mQueueKey = mContext->GetProcessQueueKey(); diff --git a/core/input/InputPrometheus.h b/core/plugin/input/InputPrometheus.h similarity index 94% rename from core/input/InputPrometheus.h rename to core/plugin/input/InputPrometheus.h index 3308ebe1e1..e0ef5a8b10 100644 --- a/core/input/InputPrometheus.h +++ b/core/plugin/input/InputPrometheus.h @@ -4,7 +4,7 @@ #include #include -#include "plugin/interface/Input.h" +#include "pipeline/plugin/interface/Input.h" #include "prometheus/schedulers/TargetSubscriberScheduler.h" namespace logtail { diff --git a/core/input/InputStaticFile.cpp b/core/plugin/input/InputStaticFile.cpp similarity index 100% rename from core/input/InputStaticFile.cpp rename to core/plugin/input/InputStaticFile.cpp diff --git a/core/input/InputStaticFile.h b/core/plugin/input/InputStaticFile.h similarity index 100% rename from core/input/InputStaticFile.h rename to core/plugin/input/InputStaticFile.h diff --git a/core/input/input.cmake b/core/plugin/input/input.cmake similarity index 72% rename from core/input/input.cmake rename to core/plugin/input/input.cmake index fb02f053cc..eb213b46b5 100644 --- a/core/input/input.cmake +++ b/core/plugin/input/input.cmake @@ -17,22 +17,22 @@ include_directories(input) # Add source files -file(GLOB THIS_SOURCE_FILES ${CMAKE_SOURCE_DIR}/input/*.c ${CMAKE_SOURCE_DIR}/input/*.cc ${CMAKE_SOURCE_DIR}/input/*.cpp ${CMAKE_SOURCE_DIR}/input/*.h) +file(GLOB THIS_SOURCE_FILES ${CMAKE_SOURCE_DIR}/plugin/input/*.c ${CMAKE_SOURCE_DIR}/plugin/input/*.cc ${CMAKE_SOURCE_DIR}/plugin/input/*.cpp ${CMAKE_SOURCE_DIR}/plugin/input/*.h) list(APPEND THIS_SOURCE_FILES_LIST ${THIS_SOURCE_FILES}) if(MSVC) # remove observer related files in input - list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/input/InputObserverNetwork.cpp ${CMAKE_SOURCE_DIR}/input/InputObserverNetwork.h) + list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/input/InputObserverNetwork.cpp ${CMAKE_SOURCE_DIR}/plugin/input/InputObserverNetwork.h) if (ENABLE_ENTERPRISE) - list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/input/InputStream.cpp ${CMAKE_SOURCE_DIR}/input/InputStream.h) + list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/input/InputStream.cpp ${CMAKE_SOURCE_DIR}/plugin/input/InputStream.h) endif () elseif(UNIX) if (NOT LINUX) # remove observer related files in input - list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/input/InputObserverNetwork.cpp ${CMAKE_SOURCE_DIR}/input/InputObserverNetwork.h) + list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/input/InputObserverNetwork.cpp ${CMAKE_SOURCE_DIR}/plugin/input/InputObserverNetwork.h) # remove inputStream in input if (ENABLE_ENTERPRISE) - list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/input/InputStream.cpp ${CMAKE_SOURCE_DIR}/input/InputStream.h) + list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/input/InputStream.cpp ${CMAKE_SOURCE_DIR}/plugin/input/InputStream.h) endif () endif() endif() diff --git a/core/input/links.cmake b/core/plugin/input/links.cmake similarity index 100% rename from core/input/links.cmake rename to core/plugin/input/links.cmake diff --git a/core/processor/CommonParserOptions.cpp b/core/plugin/processor/CommonParserOptions.cpp similarity index 92% rename from core/processor/CommonParserOptions.cpp rename to core/plugin/processor/CommonParserOptions.cpp index 615b431d2f..dd1a75308e 100644 --- a/core/processor/CommonParserOptions.cpp +++ b/core/plugin/processor/CommonParserOptions.cpp @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "processor/CommonParserOptions.h" +#include "plugin/processor/CommonParserOptions.h" #include "common/Constants.h" #include "common/ParamExtractor.h" -#include "processor/inner/ProcessorParseContainerLogNative.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" using namespace std; @@ -26,7 +26,7 @@ namespace logtail { const string CommonParserOptions::legacyUnmatchedRawLogKey = "__raw_log__"; -bool CommonParserOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginName) { +bool CommonParserOptions::Init(const Json::Value& config, const PipelineContext& ctx, const string& pluginType) { string errorMsg; // KeepingSourceWhenParseFail @@ -35,7 +35,7 @@ bool CommonParserOptions::Init(const Json::Value& config, const PipelineContext& ctx.GetAlarm(), errorMsg, mKeepingSourceWhenParseFail, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -48,7 +48,7 @@ bool CommonParserOptions::Init(const Json::Value& config, const PipelineContext& ctx.GetAlarm(), errorMsg, mKeepingSourceWhenParseSucceed, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -60,7 +60,7 @@ bool CommonParserOptions::Init(const Json::Value& config, const PipelineContext& PARAM_WARNING_IGNORE(ctx.GetLogger(), ctx.GetAlarm(), errorMsg, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), @@ -77,7 +77,7 @@ bool CommonParserOptions::Init(const Json::Value& config, const PipelineContext& ctx.GetAlarm(), errorMsg, mCopingRawLog, - pluginName, + pluginType, ctx.GetConfigName(), ctx.GetProjectName(), ctx.GetLogstoreName(), diff --git a/core/processor/CommonParserOptions.h b/core/plugin/processor/CommonParserOptions.h similarity index 97% rename from core/processor/CommonParserOptions.h rename to core/plugin/processor/CommonParserOptions.h index aa9605832c..b76345b37a 100644 --- a/core/processor/CommonParserOptions.h +++ b/core/plugin/processor/CommonParserOptions.h @@ -33,7 +33,7 @@ struct CommonParserOptions { // for backward compatability only, should not be explicitly used! bool mCopingRawLog = false; - bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginName); + bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginType); bool ShouldAddSourceContent(bool parseSuccess); bool ShouldAddLegacyUnmatchedRawLog(bool parseSuccess); bool ShouldEraseEvent(bool parseSuccess, const LogEvent& sourceEvent); diff --git a/core/processor/DynamicCProcessorProxy.cpp b/core/plugin/processor/DynamicCProcessorProxy.cpp similarity index 96% rename from core/processor/DynamicCProcessorProxy.cpp rename to core/plugin/processor/DynamicCProcessorProxy.cpp index e37e9224b9..8824aece8d 100644 --- a/core/processor/DynamicCProcessorProxy.cpp +++ b/core/plugin/processor/DynamicCProcessorProxy.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/DynamicCProcessorProxy.h" +#include "plugin/processor/DynamicCProcessorProxy.h" namespace logtail { diff --git a/core/processor/DynamicCProcessorProxy.h b/core/plugin/processor/DynamicCProcessorProxy.h similarity index 92% rename from core/processor/DynamicCProcessorProxy.h rename to core/plugin/processor/DynamicCProcessorProxy.h index 887034f30c..1508f0a003 100644 --- a/core/processor/DynamicCProcessorProxy.h +++ b/core/plugin/processor/DynamicCProcessorProxy.h @@ -16,8 +16,8 @@ #pragma once -#include "plugin/creator/CProcessor.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/creator/CProcessor.h" +#include "pipeline/plugin/interface/Processor.h" namespace logtail { diff --git a/core/processor/ProcessorDesensitizeNative.cpp b/core/plugin/processor/ProcessorDesensitizeNative.cpp similarity index 98% rename from core/processor/ProcessorDesensitizeNative.cpp rename to core/plugin/processor/ProcessorDesensitizeNative.cpp index bbe792eba4..172bb2d025 100644 --- a/core/processor/ProcessorDesensitizeNative.cpp +++ b/core/plugin/processor/ProcessorDesensitizeNative.cpp @@ -13,13 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "processor/ProcessorDesensitizeNative.h" +#include "plugin/processor/ProcessorDesensitizeNative.h" #include "common/Constants.h" #include "common/ParamExtractor.h" #include "models/LogEvent.h" #include "monitor/MetricConstants.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" #include "sdk/Common.h" namespace logtail { diff --git a/core/processor/ProcessorDesensitizeNative.h b/core/plugin/processor/ProcessorDesensitizeNative.h similarity index 97% rename from core/processor/ProcessorDesensitizeNative.h rename to core/plugin/processor/ProcessorDesensitizeNative.h index 6a51c034e3..2352b40d5a 100644 --- a/core/processor/ProcessorDesensitizeNative.h +++ b/core/plugin/processor/ProcessorDesensitizeNative.h @@ -18,7 +18,7 @@ #include -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" namespace logtail { diff --git a/core/processor/ProcessorFilterNative.cpp b/core/plugin/processor/ProcessorFilterNative.cpp similarity index 99% rename from core/processor/ProcessorFilterNative.cpp rename to core/plugin/processor/ProcessorFilterNative.cpp index 04aac8e0a9..a1929e78e3 100644 --- a/core/processor/ProcessorFilterNative.cpp +++ b/core/plugin/processor/ProcessorFilterNative.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/ProcessorFilterNative.h" +#include "plugin/processor/ProcessorFilterNative.h" #include diff --git a/core/processor/ProcessorFilterNative.h b/core/plugin/processor/ProcessorFilterNative.h similarity index 98% rename from core/processor/ProcessorFilterNative.h rename to core/plugin/processor/ProcessorFilterNative.h index 447445ab40..c5969d427b 100644 --- a/core/processor/ProcessorFilterNative.h +++ b/core/plugin/processor/ProcessorFilterNative.h @@ -18,7 +18,7 @@ #include "app_config/AppConfig.h" #include "models/LogEvent.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" #include diff --git a/core/processor/ProcessorParseApsaraNative.cpp b/core/plugin/processor/ProcessorParseApsaraNative.cpp similarity index 99% rename from core/processor/ProcessorParseApsaraNative.cpp rename to core/plugin/processor/ProcessorParseApsaraNative.cpp index a911958714..402d698704 100644 --- a/core/processor/ProcessorParseApsaraNative.cpp +++ b/core/plugin/processor/ProcessorParseApsaraNative.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/ProcessorParseApsaraNative.h" +#include "plugin/processor/ProcessorParseApsaraNative.h" #include "app_config/AppConfig.h" #include "common/LogtailCommonFlags.h" @@ -22,7 +22,7 @@ #include "common/TimeUtil.h" #include "models/LogEvent.h" #include "monitor/MetricConstants.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { diff --git a/core/processor/ProcessorParseApsaraNative.h b/core/plugin/processor/ProcessorParseApsaraNative.h similarity index 95% rename from core/processor/ProcessorParseApsaraNative.h rename to core/plugin/processor/ProcessorParseApsaraNative.h index aade2ac02a..b081df6b78 100644 --- a/core/processor/ProcessorParseApsaraNative.h +++ b/core/plugin/processor/ProcessorParseApsaraNative.h @@ -18,8 +18,8 @@ #include "common/TimeUtil.h" #include "models/LogEvent.h" -#include "plugin/interface/Processor.h" -#include "processor/CommonParserOptions.h" +#include "pipeline/plugin/interface/Processor.h" +#include "plugin/processor/CommonParserOptions.h" namespace logtail { diff --git a/core/processor/ProcessorParseDelimiterNative.cpp b/core/plugin/processor/ProcessorParseDelimiterNative.cpp similarity index 99% rename from core/processor/ProcessorParseDelimiterNative.cpp rename to core/plugin/processor/ProcessorParseDelimiterNative.cpp index 5c9ec84464..2e73690fa8 100644 --- a/core/processor/ProcessorParseDelimiterNative.cpp +++ b/core/plugin/processor/ProcessorParseDelimiterNative.cpp @@ -14,12 +14,12 @@ * limitations under the License. */ -#include "processor/ProcessorParseDelimiterNative.h" +#include "plugin/processor/ProcessorParseDelimiterNative.h" #include "common/ParamExtractor.h" #include "models/LogEvent.h" #include "monitor/MetricConstants.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { diff --git a/core/processor/ProcessorParseDelimiterNative.h b/core/plugin/processor/ProcessorParseDelimiterNative.h similarity index 97% rename from core/processor/ProcessorParseDelimiterNative.h rename to core/plugin/processor/ProcessorParseDelimiterNative.h index b91b02a03f..6815ef7561 100644 --- a/core/processor/ProcessorParseDelimiterNative.h +++ b/core/plugin/processor/ProcessorParseDelimiterNative.h @@ -20,8 +20,8 @@ #include "models/LogEvent.h" #include "parser/DelimiterModeFsmParser.h" -#include "plugin/interface/Processor.h" -#include "processor/CommonParserOptions.h" +#include "pipeline/plugin/interface/Processor.h" +#include "plugin/processor/CommonParserOptions.h" namespace logtail { diff --git a/core/processor/ProcessorParseJsonNative.cpp b/core/plugin/processor/ProcessorParseJsonNative.cpp similarity index 98% rename from core/processor/ProcessorParseJsonNative.cpp rename to core/plugin/processor/ProcessorParseJsonNative.cpp index 8411715033..ec83a88696 100644 --- a/core/processor/ProcessorParseJsonNative.cpp +++ b/core/plugin/processor/ProcessorParseJsonNative.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/ProcessorParseJsonNative.h" +#include "plugin/processor/ProcessorParseJsonNative.h" #include #include @@ -22,7 +22,7 @@ #include "common/ParamExtractor.h" #include "models/LogEvent.h" #include "monitor/MetricConstants.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { diff --git a/core/processor/ProcessorParseJsonNative.h b/core/plugin/processor/ProcessorParseJsonNative.h similarity index 95% rename from core/processor/ProcessorParseJsonNative.h rename to core/plugin/processor/ProcessorParseJsonNative.h index 5f51c879c2..b071f2775a 100644 --- a/core/processor/ProcessorParseJsonNative.h +++ b/core/plugin/processor/ProcessorParseJsonNative.h @@ -18,8 +18,8 @@ #include #include "models/LogEvent.h" -#include "plugin/interface/Processor.h" -#include "processor/CommonParserOptions.h" +#include "pipeline/plugin/interface/Processor.h" +#include "plugin/processor/CommonParserOptions.h" namespace logtail { diff --git a/core/processor/ProcessorParseRegexNative.cpp b/core/plugin/processor/ProcessorParseRegexNative.cpp similarity index 99% rename from core/processor/ProcessorParseRegexNative.cpp rename to core/plugin/processor/ProcessorParseRegexNative.cpp index ae3b892f73..e3c5790d99 100644 --- a/core/processor/ProcessorParseRegexNative.cpp +++ b/core/plugin/processor/ProcessorParseRegexNative.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/ProcessorParseRegexNative.h" +#include "plugin/processor/ProcessorParseRegexNative.h" #include "app_config/AppConfig.h" #include "common/ParamExtractor.h" diff --git a/core/processor/ProcessorParseRegexNative.h b/core/plugin/processor/ProcessorParseRegexNative.h similarity index 95% rename from core/processor/ProcessorParseRegexNative.h rename to core/plugin/processor/ProcessorParseRegexNative.h index 3b2a54926c..fa50d27820 100644 --- a/core/processor/ProcessorParseRegexNative.h +++ b/core/plugin/processor/ProcessorParseRegexNative.h @@ -21,8 +21,8 @@ #include #include "models/LogEvent.h" -#include "plugin/interface/Processor.h" -#include "processor/CommonParserOptions.h" +#include "pipeline/plugin/interface/Processor.h" +#include "plugin/processor/CommonParserOptions.h" namespace logtail { diff --git a/core/processor/ProcessorParseTimestampNative.cpp b/core/plugin/processor/ProcessorParseTimestampNative.cpp similarity index 98% rename from core/processor/ProcessorParseTimestampNative.cpp rename to core/plugin/processor/ProcessorParseTimestampNative.cpp index d8ca62ec5c..b37343f1db 100644 --- a/core/processor/ProcessorParseTimestampNative.cpp +++ b/core/plugin/processor/ProcessorParseTimestampNative.cpp @@ -14,13 +14,13 @@ * limitations under the License. */ -#include "processor/ProcessorParseTimestampNative.h" +#include "plugin/processor/ProcessorParseTimestampNative.h" #include "app_config/AppConfig.h" #include "common/LogtailCommonFlags.h" #include "common/ParamExtractor.h" #include "monitor/MetricConstants.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { diff --git a/core/processor/ProcessorParseTimestampNative.h b/core/plugin/processor/ProcessorParseTimestampNative.h similarity index 97% rename from core/processor/ProcessorParseTimestampNative.h rename to core/plugin/processor/ProcessorParseTimestampNative.h index 0596951d60..c66c3b5118 100644 --- a/core/processor/ProcessorParseTimestampNative.h +++ b/core/plugin/processor/ProcessorParseTimestampNative.h @@ -17,7 +17,7 @@ #pragma once #include "common/TimeUtil.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" namespace logtail { class ProcessorParseTimestampNative : public Processor { diff --git a/core/processor/ProcessorSPL.cpp b/core/plugin/processor/ProcessorSPL.cpp similarity index 99% rename from core/processor/ProcessorSPL.cpp rename to core/plugin/processor/ProcessorSPL.cpp index 9641cface5..d3864de236 100644 --- a/core/processor/ProcessorSPL.cpp +++ b/core/plugin/processor/ProcessorSPL.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/ProcessorSPL.h" +#include "plugin/processor/ProcessorSPL.h" #include #ifdef FMT_HEADER_ONLY diff --git a/core/processor/ProcessorSPL.h b/core/plugin/processor/ProcessorSPL.h similarity index 97% rename from core/processor/ProcessorSPL.h rename to core/plugin/processor/ProcessorSPL.h index 191f48d5f3..c03a49bfc5 100644 --- a/core/processor/ProcessorSPL.h +++ b/core/plugin/processor/ProcessorSPL.h @@ -17,7 +17,7 @@ #include #include "monitor/LogtailMetric.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" namespace apsara::sls::spl { class SplPipeline; diff --git a/core/processor/inner/ProcessorMergeMultilineLogNative.cpp b/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.cpp similarity index 99% rename from core/processor/inner/ProcessorMergeMultilineLogNative.cpp rename to core/plugin/processor/inner/ProcessorMergeMultilineLogNative.cpp index da8dfc3ce0..077e266099 100644 --- a/core/processor/inner/ProcessorMergeMultilineLogNative.cpp +++ b/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" #include #include diff --git a/core/processor/inner/ProcessorMergeMultilineLogNative.h b/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.h similarity index 97% rename from core/processor/inner/ProcessorMergeMultilineLogNative.h rename to core/plugin/processor/inner/ProcessorMergeMultilineLogNative.h index ab3b37e849..2f67ab233e 100644 --- a/core/processor/inner/ProcessorMergeMultilineLogNative.h +++ b/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.h @@ -19,7 +19,7 @@ #include #include "file_server/MultilineOptions.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" namespace logtail { diff --git a/core/processor/inner/ProcessorParseContainerLogNative.cpp b/core/plugin/processor/inner/ProcessorParseContainerLogNative.cpp similarity index 99% rename from core/processor/inner/ProcessorParseContainerLogNative.cpp rename to core/plugin/processor/inner/ProcessorParseContainerLogNative.cpp index 34addbdcda..efd965791b 100644 --- a/core/processor/inner/ProcessorParseContainerLogNative.cpp +++ b/core/plugin/processor/inner/ProcessorParseContainerLogNative.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/inner/ProcessorParseContainerLogNative.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" #include #include @@ -27,7 +27,7 @@ #include "common/ParamExtractor.h" #include "models/LogEvent.h" #include "monitor/MetricConstants.h" -#include "processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" namespace logtail { diff --git a/core/processor/inner/ProcessorParseContainerLogNative.h b/core/plugin/processor/inner/ProcessorParseContainerLogNative.h similarity index 98% rename from core/processor/inner/ProcessorParseContainerLogNative.h rename to core/plugin/processor/inner/ProcessorParseContainerLogNative.h index 10178de379..8fdbcfc0fa 100644 --- a/core/processor/inner/ProcessorParseContainerLogNative.h +++ b/core/plugin/processor/inner/ProcessorParseContainerLogNative.h @@ -17,7 +17,7 @@ #pragma once #include "models/LogEvent.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" namespace logtail { diff --git a/core/processor/inner/ProcessorPromParseMetricNative.cpp b/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp similarity index 57% rename from core/processor/inner/ProcessorPromParseMetricNative.cpp rename to core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp index 1a88ab1839..894c76f700 100644 --- a/core/processor/inner/ProcessorPromParseMetricNative.cpp +++ b/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp @@ -1,7 +1,8 @@ -#include "processor/inner/ProcessorPromParseMetricNative.h" +#include "plugin/processor/inner/ProcessorPromParseMetricNative.h" #include +#include "common/StringTools.h" #include "models/LogEvent.h" #include "models/MetricEvent.h" #include "models/PipelineEventGroup.h" @@ -26,26 +27,30 @@ void ProcessorPromParseMetricNative::Process(PipelineEventGroup& eGroup) { EventsContainer& events = eGroup.MutableEvents(); EventsContainer newEvents; + StringView scrapeTimestampMilliSecStr = eGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC); + auto timestampMilliSec = StringTo(scrapeTimestampMilliSecStr.to_string()); + auto timestamp = timestampMilliSec / 1000; + auto nanoSec = timestampMilliSec % 1000 * 1000000; + for (auto& e : events) { - ProcessEvent(e, newEvents, eGroup); + ProcessEvent(e, newEvents, eGroup, timestamp, nanoSec); } events.swap(newEvents); + eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED, ToString(events.size())); } bool ProcessorPromParseMetricNative::IsSupportedEvent(const PipelineEventPtr& e) const { return e.Is(); } -bool ProcessorPromParseMetricNative::ProcessEvent(PipelineEventPtr& e, - EventsContainer& newEvents, - PipelineEventGroup& eGroup) { +bool ProcessorPromParseMetricNative::ProcessEvent( + PipelineEventPtr& e, EventsContainer& newEvents, PipelineEventGroup& eGroup, uint64_t timestamp, uint32_t nanoSec) { if (!IsSupportedEvent(e)) { return false; } auto& sourceEvent = e.Cast(); std::unique_ptr metricEvent = eGroup.CreateMetricEvent(); - if (mParser.ParseLine( - sourceEvent.GetContent(prometheus::PROMETHEUS).to_string(), *metricEvent, sourceEvent.GetTimestamp())) { + if (mParser.ParseLine(sourceEvent.GetContent(prometheus::PROMETHEUS), timestamp, nanoSec, *metricEvent)) { newEvents.emplace_back(std::move(metricEvent)); } return true; diff --git a/core/processor/inner/ProcessorPromParseMetricNative.h b/core/plugin/processor/inner/ProcessorPromParseMetricNative.h similarity index 87% rename from core/processor/inner/ProcessorPromParseMetricNative.h rename to core/plugin/processor/inner/ProcessorPromParseMetricNative.h index fab7944f26..da14bf4939 100644 --- a/core/processor/inner/ProcessorPromParseMetricNative.h +++ b/core/plugin/processor/inner/ProcessorPromParseMetricNative.h @@ -4,7 +4,7 @@ #include "models/PipelineEventGroup.h" #include "models/PipelineEventPtr.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" #include "prometheus/labels/TextParser.h" namespace logtail { @@ -20,7 +20,7 @@ class ProcessorPromParseMetricNative : public Processor { bool IsSupportedEvent(const PipelineEventPtr&) const override; private: - bool ProcessEvent(PipelineEventPtr&, EventsContainer&, PipelineEventGroup&); + bool ProcessEvent(PipelineEventPtr&, EventsContainer&, PipelineEventGroup&, uint64_t timestamp, uint32_t nanoSec); TextParser mParser; #ifdef APSARA_UNIT_TEST_MAIN diff --git a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp new file mode 100644 index 0000000000..207b240be6 --- /dev/null +++ b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp @@ -0,0 +1,198 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "plugin/processor/inner/ProcessorPromRelabelMetricNative.h" + +#include + +#include + +#include "common/StringTools.h" +#include "models/MetricEvent.h" +#include "models/PipelineEventGroup.h" +#include "models/PipelineEventPtr.h" +#include "prometheus/Constants.h" +#include "prometheus/Utils.h" + +using namespace std; +namespace logtail { + +const string ProcessorPromRelabelMetricNative::sName = "processor_prom_relabel_metric_native"; + +// only for inner processor +bool ProcessorPromRelabelMetricNative::Init(const Json::Value& config) { + std::string errorMsg; + if (config.isMember(prometheus::METRIC_RELABEL_CONFIGS) && config[prometheus::METRIC_RELABEL_CONFIGS].isArray() + && config[prometheus::METRIC_RELABEL_CONFIGS].size() > 0) { + for (const auto& item : config[prometheus::METRIC_RELABEL_CONFIGS]) { + mRelabelConfigs.emplace_back(item); + if (!mRelabelConfigs.back().Validate()) { + errorMsg = "metric_relabel_configs is invalid"; + LOG_ERROR(sLogger, ("init prometheus processor failed", errorMsg)); + return false; + } + } + return true; + } + + + if (config.isMember(prometheus::JOB_NAME) && config[prometheus::JOB_NAME].isString()) { + mJobName = config[prometheus::JOB_NAME].asString(); + } else { + return false; + } + if (config.isMember(prometheus::SCRAPE_TIMEOUT) && config[prometheus::SCRAPE_TIMEOUT].isString()) { + string tmpScrapeTimeoutString = config[prometheus::SCRAPE_TIMEOUT].asString(); + mScrapeTimeoutSeconds = DurationToSecond(tmpScrapeTimeoutString); + } else { + mScrapeTimeoutSeconds = 10; + } + if (config.isMember(prometheus::SAMPLE_LIMIT) && config[prometheus::SAMPLE_LIMIT].isInt64()) { + mSampleLimit = config[prometheus::SAMPLE_LIMIT].asInt64(); + } else { + mSampleLimit = -1; + } + if (config.isMember(prometheus::SERIES_LIMIT) && config[prometheus::SERIES_LIMIT].isInt64()) { + mSeriesLimit = config[prometheus::SERIES_LIMIT].asInt64(); + } else { + mSeriesLimit = -1; + } + + return true; +} + +void ProcessorPromRelabelMetricNative::Process(PipelineEventGroup& metricGroup) { + auto instance = metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_INSTANCE); + + EventsContainer& events = metricGroup.MutableEvents(); + + size_t wIdx = 0; + for (size_t rIdx = 0; rIdx < events.size(); ++rIdx) { + if (ProcessEvent(events[rIdx], instance)) { + if (wIdx != rIdx) { + events[wIdx] = std::move(events[rIdx]); + } + ++wIdx; + } + } + events.resize(wIdx); + + AddAutoMetrics(metricGroup); +} + +bool ProcessorPromRelabelMetricNative::IsSupportedEvent(const PipelineEventPtr& e) const { + return e.Is(); +} + +bool ProcessorPromRelabelMetricNative::ProcessEvent(PipelineEventPtr& e, StringView instance) { + if (!IsSupportedEvent(e)) { + return false; + } + auto& sourceEvent = e.Cast(); + + Labels labels; + + labels.Reset(&sourceEvent); + Labels result; + + // if keep this sourceEvent + if (prometheus::Process(labels, mRelabelConfigs, result)) { + // if k/v in labels by not result, then delete it + labels.Range([&result, &sourceEvent](const Label& label) { + if (result.Get(label.name).empty()) { + sourceEvent.DelTag(StringView(label.name)); + } + }); + + // for each k/v in result, set it to sourceEvent + result.Range([&sourceEvent](const Label& label) { sourceEvent.SetTag(label.name, label.value); }); + + // set metricEvent name + if (!result.Get(prometheus::NAME).empty()) { + sourceEvent.SetName(result.Get(prometheus::NAME)); + } + + sourceEvent.SetTag(prometheus::JOB, mJobName); + sourceEvent.SetTag(prometheus::INSTANCE, instance); + + return true; + } + return false; +} + +void ProcessorPromRelabelMetricNative::AddAutoMetrics(PipelineEventGroup& metricGroup) { + // if up is set, then add self monitor metrics + if (metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE).empty()) { + return; + } + + StringView scrapeTimestampMilliSecStr + = metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC); + auto timestampMilliSec = StringTo(scrapeTimestampMilliSecStr.to_string()); + auto timestamp = timestampMilliSec / 1000; + auto nanoSec = timestampMilliSec % 1000 * 1000000; + + auto instance = metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_INSTANCE); + + uint64_t samplesPostMetricRelabel = metricGroup.GetEvents().size(); + + auto scrapeDurationSeconds + = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION).to_string()); + + AddMetric(metricGroup, prometheus::SCRAPE_DURATION_SECONDS, scrapeDurationSeconds, timestamp, nanoSec, instance); + + auto scrapeResponseSize + = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE).to_string()); + AddMetric(metricGroup, prometheus::SCRAPE_RESPONSE_SIZE_BYTES, scrapeResponseSize, timestamp, nanoSec, instance); + + if (mSampleLimit > 0) { + AddMetric(metricGroup, prometheus::SCRAPE_SAMPLES_LIMIT, mSampleLimit, timestamp, nanoSec, instance); + } + + AddMetric(metricGroup, + prometheus::SCRAPE_SAMPLES_POST_METRIC_RELABELING, + samplesPostMetricRelabel, + timestamp, + nanoSec, + instance); + + auto samplesScraped + = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED).to_string()); + + AddMetric(metricGroup, prometheus::SCRAPE_SAMPLES_SCRAPED, samplesScraped, timestamp, nanoSec, instance); + + AddMetric(metricGroup, prometheus::SCRAPE_TIMEOUT_SECONDS, mScrapeTimeoutSeconds, timestamp, nanoSec, instance); + + // up metric must be the last one + bool upState = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE).to_string()); + + AddMetric(metricGroup, prometheus::UP, 1.0 * upState, timestamp, nanoSec, instance); +} + +void ProcessorPromRelabelMetricNative::AddMetric(PipelineEventGroup& metricGroup, + const string& name, + double value, + time_t timestamp, + uint32_t nanoSec, + StringView instance) { + auto* metricEvent = metricGroup.AddMetricEvent(); + metricEvent->SetName(name); + metricEvent->SetValue(value); + metricEvent->SetTimestamp(timestamp, nanoSec); + metricEvent->SetTag(prometheus::JOB, mJobName); + metricEvent->SetTag(prometheus::INSTANCE, instance); +} + +} // namespace logtail diff --git a/core/processor/inner/ProcessorPromRelabelMetricNative.h b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h similarity index 71% rename from core/processor/inner/ProcessorPromRelabelMetricNative.h rename to core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h index 926f9da52e..82aaa0ce20 100644 --- a/core/processor/inner/ProcessorPromRelabelMetricNative.h +++ b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h @@ -20,7 +20,7 @@ #include "models/PipelineEventGroup.h" #include "models/PipelineEventPtr.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" #include "prometheus/labels/Relabel.h" namespace logtail { @@ -36,10 +36,24 @@ class ProcessorPromRelabelMetricNative : public Processor { bool IsSupportedEvent(const PipelineEventPtr& e) const override; private: - bool ProcessEvent(PipelineEventPtr& e); + bool ProcessEvent(PipelineEventPtr& e, StringView instance); + + void AddAutoMetrics(PipelineEventGroup& metricGroup); + void AddMetric(PipelineEventGroup& metricGroup, + const std::string& name, + double value, + time_t timestamp, + uint32_t nanoSec, + StringView instance); std::vector mRelabelConfigs; + // from config + std::string mJobName; + int64_t mScrapeTimeoutSeconds; + int64_t mSampleLimit; + int64_t mSeriesLimit; + #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorPromRelabelMetricNativeUnittest; friend class InputPrometheusUnittest; diff --git a/core/processor/inner/ProcessorSplitLogStringNative.cpp b/core/plugin/processor/inner/ProcessorSplitLogStringNative.cpp similarity index 99% rename from core/processor/inner/ProcessorSplitLogStringNative.cpp rename to core/plugin/processor/inner/ProcessorSplitLogStringNative.cpp index 8df6af83f9..35716caf59 100644 --- a/core/processor/inner/ProcessorSplitLogStringNative.cpp +++ b/core/plugin/processor/inner/ProcessorSplitLogStringNative.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" #include "common/ParamExtractor.h" #include "models/LogEvent.h" diff --git a/core/processor/inner/ProcessorSplitLogStringNative.h b/core/plugin/processor/inner/ProcessorSplitLogStringNative.h similarity index 96% rename from core/processor/inner/ProcessorSplitLogStringNative.h rename to core/plugin/processor/inner/ProcessorSplitLogStringNative.h index a1a394e9c0..7b0febbd21 100644 --- a/core/processor/inner/ProcessorSplitLogStringNative.h +++ b/core/plugin/processor/inner/ProcessorSplitLogStringNative.h @@ -20,7 +20,7 @@ #include #include "common/Constants.h" -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" namespace logtail { diff --git a/core/processor/inner/ProcessorSplitMultilineLogStringNative.cpp b/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.cpp similarity index 99% rename from core/processor/inner/ProcessorSplitMultilineLogStringNative.cpp rename to core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.cpp index d863844a65..840b6479dd 100644 --- a/core/processor/inner/ProcessorSplitMultilineLogStringNative.cpp +++ b/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" #include #include @@ -25,7 +25,7 @@ #include "logger/Logger.h" #include "models/LogEvent.h" #include "monitor/MetricConstants.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { diff --git a/core/processor/inner/ProcessorSplitMultilineLogStringNative.h b/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h similarity index 96% rename from core/processor/inner/ProcessorSplitMultilineLogStringNative.h rename to core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h index 603d911355..6848df98df 100644 --- a/core/processor/inner/ProcessorSplitMultilineLogStringNative.h +++ b/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h @@ -21,8 +21,8 @@ #include "common/Constants.h" #include "file_server/MultilineOptions.h" -#include "plugin/interface/Processor.h" -#include "processor/CommonParserOptions.h" +#include "pipeline/plugin/interface/Processor.h" +#include "plugin/processor/CommonParserOptions.h" namespace logtail { diff --git a/core/processor/inner/ProcessorTagNative.cpp b/core/plugin/processor/inner/ProcessorTagNative.cpp similarity index 96% rename from core/processor/inner/ProcessorTagNative.cpp rename to core/plugin/processor/inner/ProcessorTagNative.cpp index 687617006f..e9f9926339 100644 --- a/core/processor/inner/ProcessorTagNative.cpp +++ b/core/plugin/processor/inner/ProcessorTagNative.cpp @@ -14,14 +14,14 @@ * limitations under the License. */ -#include "processor/inner/ProcessorTagNative.h" +#include "plugin/processor/inner/ProcessorTagNative.h" #include #include "app_config/AppConfig.h" #include "application/Application.h" #include "common/Flags.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "pipeline/Pipeline.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" diff --git a/core/processor/inner/ProcessorTagNative.h b/core/plugin/processor/inner/ProcessorTagNative.h similarity index 95% rename from core/processor/inner/ProcessorTagNative.h rename to core/plugin/processor/inner/ProcessorTagNative.h index 46e1bad3ac..04676f07d8 100644 --- a/core/processor/inner/ProcessorTagNative.h +++ b/core/plugin/processor/inner/ProcessorTagNative.h @@ -16,7 +16,7 @@ #pragma once -#include "plugin/interface/Processor.h" +#include "pipeline/plugin/interface/Processor.h" namespace logtail { diff --git a/core/processor/links.cmake b/core/plugin/processor/links.cmake similarity index 100% rename from core/processor/links.cmake rename to core/plugin/processor/links.cmake diff --git a/core/processor/processor.cmake b/core/plugin/processor/processor.cmake similarity index 55% rename from core/processor/processor.cmake rename to core/plugin/processor/processor.cmake index 3dd62297ef..5c7e7b9ba4 100644 --- a/core/processor/processor.cmake +++ b/core/plugin/processor/processor.cmake @@ -17,16 +17,13 @@ include_directories(processor) # Add source files -file(GLOB THIS_SOURCE_FILES ${CMAKE_SOURCE_DIR}/processor/*.c ${CMAKE_SOURCE_DIR}/processor/*.cc ${CMAKE_SOURCE_DIR}/processor/*.cpp ${CMAKE_SOURCE_DIR}/processor/*.h) -list(APPEND THIS_SOURCE_FILES_LIST ${THIS_SOURCE_FILES}) -# add processor/daemon -file(GLOB THIS_SOURCE_FILES ${CMAKE_SOURCE_DIR}/processor/daemon/*.c ${CMAKE_SOURCE_DIR}/processor/daemon/*.cc ${CMAKE_SOURCE_DIR}/processor/daemon/*.cpp ${CMAKE_SOURCE_DIR}/processor/daemon/*.h) +file(GLOB THIS_SOURCE_FILES ${CMAKE_SOURCE_DIR}/plugin/processor/*.c ${CMAKE_SOURCE_DIR}/plugin/processor/*.cc ${CMAKE_SOURCE_DIR}/plugin/processor/*.cpp ${CMAKE_SOURCE_DIR}/plugin/processor/*.h) list(APPEND THIS_SOURCE_FILES_LIST ${THIS_SOURCE_FILES}) # add processor/inner -file(GLOB THIS_SOURCE_FILES ${CMAKE_SOURCE_DIR}/processor/inner/*.c ${CMAKE_SOURCE_DIR}/processor/inner/*.cc ${CMAKE_SOURCE_DIR}/processor/inner/*.cpp ${CMAKE_SOURCE_DIR}/processor/inner/*.h) +file(GLOB THIS_SOURCE_FILES ${CMAKE_SOURCE_DIR}/plugin/processor/inner/*.c ${CMAKE_SOURCE_DIR}/plugin/processor/inner/*.cc ${CMAKE_SOURCE_DIR}/plugin/processor/inner/*.cpp ${CMAKE_SOURCE_DIR}/plugin/processor/inner/*.h) list(APPEND THIS_SOURCE_FILES_LIST ${THIS_SOURCE_FILES}) # Set source files to parent -list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/processor/ProcessorSPL.cpp ${CMAKE_SOURCE_DIR}/processor/ProcessorSPL.h) +list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/processor/ProcessorSPL.cpp ${CMAKE_SOURCE_DIR}/plugin/processor/ProcessorSPL.h) set(PLUGIN_SOURCE_FILES_CORE ${PLUGIN_SOURCE_FILES_CORE} ${THIS_SOURCE_FILES_LIST}) -set(PLUGIN_SOURCE_FILES_SPL ${PLUGIN_SOURCE_FILES_SPL} ${CMAKE_SOURCE_DIR}/processor/ProcessorSPL.cpp ${CMAKE_SOURCE_DIR}/processor/ProcessorSPL.h) +set(PLUGIN_SOURCE_FILES_SPL ${PLUGIN_SOURCE_FILES_SPL} ${CMAKE_SOURCE_DIR}/plugin/processor/ProcessorSPL.cpp ${CMAKE_SOURCE_DIR}/plugin/processor/ProcessorSPL.h) diff --git a/core/processor/inner/ProcessorPromRelabelMetricNative.cpp b/core/processor/inner/ProcessorPromRelabelMetricNative.cpp deleted file mode 100644 index f2d8550195..0000000000 --- a/core/processor/inner/ProcessorPromRelabelMetricNative.cpp +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2024 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "processor/inner/ProcessorPromRelabelMetricNative.h" - -#include - -#include - -#include "models/MetricEvent.h" -#include "models/PipelineEventGroup.h" -#include "models/PipelineEventPtr.h" -#include "prometheus/Constants.h" - -using namespace std; -namespace logtail { - -const string ProcessorPromRelabelMetricNative::sName = "processor_prom_relabel_metric_native"; - -// only for inner processor -bool ProcessorPromRelabelMetricNative::Init(const Json::Value& config) { - std::string errorMsg; - if (config.isMember(prometheus::METRIC_RELABEL_CONFIGS) && config[prometheus::METRIC_RELABEL_CONFIGS].isArray() - && config[prometheus::METRIC_RELABEL_CONFIGS].size() > 0) { - for (const auto& item : config[prometheus::METRIC_RELABEL_CONFIGS]) { - mRelabelConfigs.emplace_back(item); - if (!mRelabelConfigs.back().Validate()) { - errorMsg = "metric_relabel_configs is invalid"; - LOG_ERROR(sLogger, ("init prometheus processor failed", errorMsg)); - return false; - } - } - return true; - } - return true; -} - -void ProcessorPromRelabelMetricNative::Process(PipelineEventGroup& metricGroup) { - if (metricGroup.GetEvents().empty()) { - return; - } - - EventsContainer& events = metricGroup.MutableEvents(); - - size_t wIdx = 0; - for (size_t rIdx = 0; rIdx < events.size(); ++rIdx) { - if (ProcessEvent(events[rIdx])) { - if (wIdx != rIdx) { - events[wIdx] = std::move(events[rIdx]); - } - ++wIdx; - } - } - events.resize(wIdx); -} - -bool ProcessorPromRelabelMetricNative::IsSupportedEvent(const PipelineEventPtr& e) const { - return e.Is(); -} - -bool ProcessorPromRelabelMetricNative::ProcessEvent(PipelineEventPtr& e) { - if (!IsSupportedEvent(e)) { - return false; - } - auto& sourceEvent = e.Cast(); - - Labels labels; - - labels.Reset(&sourceEvent); - Labels result; - - // if keep this sourceEvent - if (prometheus::Process(labels, mRelabelConfigs, result)) { - // if k/v in labels by not result, then delete it - labels.Range([&result, &sourceEvent](const Label& label) { - if (result.Get(label.name).empty()) { - sourceEvent.DelTag(StringView(label.name)); - } - }); - - // for each k/v in result, set it to sourceEvent - result.Range([&sourceEvent](const Label& label) { sourceEvent.SetTag(label.name, label.value); }); - - // set metricEvent name - if (!result.Get(prometheus::NAME).empty()) { - sourceEvent.SetName(result.Get(prometheus::NAME)); - } - return true; - } - return false; -} - -} // namespace logtail diff --git a/core/profile_sender/ProfileSender.cpp b/core/profile_sender/ProfileSender.cpp index 9f0b372fc3..1121e91216 100644 --- a/core/profile_sender/ProfileSender.cpp +++ b/core/profile_sender/ProfileSender.cpp @@ -28,7 +28,7 @@ #include "sdk/Exception.h" #include "sls_control/SLSControl.h" // TODO: temporarily used -#include "compression/CompressorFactory.h" +#include "pipeline/compression/CompressorFactory.h" using namespace std; @@ -102,7 +102,7 @@ void ProfileSender::SetProfileProjectName(const string& region, const string& pr // logstore is given at send time // TODO: temporarily used flusher.mCompressor - = CompressorFactory::GetInstance()->Create(Json::Value(), PipelineContext(), "flusher_sls", CompressType::LZ4); + = CompressorFactory::GetInstance()->Create(Json::Value(), PipelineContext(), "flusher_sls", CompressType::ZSTD); } FlusherSLS* ProfileSender::GetFlusher(const string& region) { diff --git a/core/profile_sender/ProfileSender.h b/core/profile_sender/ProfileSender.h index 6d0e2d3814..fc91d22233 100644 --- a/core/profile_sender/ProfileSender.h +++ b/core/profile_sender/ProfileSender.h @@ -21,8 +21,8 @@ #include #include "common/Lock.h" -#include "log_pb/sls_logs.pb.h" -#include "flusher/sls/FlusherSLS.h" +#include "protobuf/sls/sls_logs.pb.h" +#include "plugin/flusher/sls/FlusherSLS.h" namespace logtail { diff --git a/core/prometheus/Constants.h b/core/prometheus/Constants.h index 9b445ebfba..26137ef755 100644 --- a/core/prometheus/Constants.h +++ b/core/prometheus/Constants.h @@ -59,9 +59,18 @@ const char* const SERIES_LIMIT = "series_limit"; const char* const MAX_SCRAPE_SIZE = "max_scrape_size"; const char* const METRIC_RELABEL_CONFIGS = "metric_relabel_configs"; const char* const AUTHORIZATION = "authorization"; +const char* const AUTHORIZATION_DEFAULT_TYEP = "Bearer"; const char* const A_UTHORIZATION = "Authorization"; const char* const TYPE = "type"; +const char* const CREDENTIALS = "credentials"; const char* const CREDENTIALS_FILE = "credentials_file"; +const char* const BASIC_AUTH = "basic_auth"; +const char* const USERNAME = "username"; +const char* const USERNAME_FILE = "username_file"; +const char* const PASSWORD = "password"; +const char* const PASSWORD_FILE = "password_file"; +const char* const BASIC_PREFIX = "Basic "; + // metric labels const char* const JOB = "job"; @@ -74,4 +83,15 @@ const char* const METRICS_PATH_LABEL_NAME = "__metrics_path__"; const char* const PARAM_LABEL_NAME = "__param_"; const char* const LABELS = "labels"; +// auto metrics +const char* const SCRAPE_DURATION_SECONDS = "scrape_duration_seconds"; +const char* const SCRAPE_RESPONSE_SIZE_BYTES = "scrape_response_size_bytes"; +const char* const SCRAPE_SAMPLES_LIMIT = "scrape_samples_limit"; +const char* const SCRAPE_SAMPLES_POST_METRIC_RELABELING = "scrape_samples_post_metric_relabeling"; +const char* const SCRAPE_SAMPLES_SCRAPED = "scrape_samples_scraped"; +const char* const SCRAPE_TIMEOUT_SECONDS = "scrape_timeout_seconds"; +const char* const UP = "up"; + +const char* const SCRAPE_TIMESTAMP_MILLISEC = "scrape_timestamp_millisec"; + } // namespace logtail::prometheus diff --git a/core/prometheus/PrometheusInputRunner.cpp b/core/prometheus/PrometheusInputRunner.cpp index 7e3c8b0502..e5fbcf715c 100644 --- a/core/prometheus/PrometheusInputRunner.cpp +++ b/core/prometheus/PrometheusInputRunner.cpp @@ -38,7 +38,6 @@ DECLARE_FLAG_STRING(_pod_name_); namespace logtail { PrometheusInputRunner::PrometheusInputRunner() : mUnRegisterMs(0) { - mIsStarted.store(false); mClient = std::make_unique(); mServiceHost = STRING_FLAG(loong_collector_operator_service); @@ -55,7 +54,7 @@ void PrometheusInputRunner::UpdateScrapeInput(std::shared_ptrmServicePort = mServicePort; targetSubscriber->mPodName = mPodName; - targetSubscriber->mUnRegisterMs = mUnRegisterMs; + targetSubscriber->mUnRegisterMs = mUnRegisterMs.load(); targetSubscriber->SetTimer(mTimer); targetSubscriber->SetFirstExecTime(std::chrono::steady_clock::now()); // 1. add subscriber to mTargetSubscriberSchedulerMap @@ -76,18 +75,22 @@ void PrometheusInputRunner::RemoveScrapeInput(const std::string& jobName) { } /// @brief targets discovery and start scrape work -void PrometheusInputRunner::Start() { - LOG_INFO(sLogger, ("PrometheusInputRunner", "Start")); - if (mIsStarted.load()) { +void PrometheusInputRunner::Init() { + std::lock_guard lock(mStartMutex); + if (mIsStarted) { return; } - mIsStarted.store(true); + LOG_INFO(sLogger, ("PrometheusInputRunner", "Start")); + mIsStarted = true; mTimer->Init(); AsynCurlRunner::GetInstance()->Init(); - mThreadRes = std::async(launch::async, [this]() { - // only register when operator exist - if (!mServiceHost.empty()) { + LOG_INFO(sLogger, ("PrometheusInputRunner", "register")); + // only register when operator exist + if (!mServiceHost.empty()) { + mIsThreadRunning.store(true); + auto res = std::async(launch::async, [this]() { + std::lock_guard lock(mRegisterMutex); int retry = 0; while (mIsThreadRunning.load()) { ++retry; @@ -109,7 +112,7 @@ void PrometheusInputRunner::Start() { } if (responseJson.isMember(prometheus::UNREGISTER_MS) && responseJson[prometheus::UNREGISTER_MS].isUInt64()) { - mUnRegisterMs = responseJson[prometheus::UNREGISTER_MS].asUInt64(); + mUnRegisterMs.store(responseJson[prometheus::UNREGISTER_MS].asUInt64()); } } LOG_INFO(sLogger, ("Register Success", mPodName)); @@ -117,20 +120,25 @@ void PrometheusInputRunner::Start() { } std::this_thread::sleep_for(std::chrono::seconds(1)); } - } - }); + }); + } } /// @brief stop scrape work and clear all scrape jobs void PrometheusInputRunner::Stop() { - LOG_INFO(sLogger, ("PrometheusInputRunner", "Stop")); + std::lock_guard lock(mStartMutex); + if (!mIsStarted) { + return; + } - mIsStarted.store(false); + mIsStarted = false; mIsThreadRunning.store(false); mTimer->Stop(); + LOG_INFO(sLogger, ("PrometheusInputRunner", "stop asyn curl runner")); AsynCurlRunner::GetInstance()->Stop(); + LOG_INFO(sLogger, ("PrometheusInputRunner", "cancel all target subscribers")); CancelAllTargetSubscriber(); { WriteLock lock(mSubscriberMapRWLock); @@ -139,7 +147,9 @@ void PrometheusInputRunner::Stop() { // only unregister when operator exist if (!mServiceHost.empty()) { + LOG_INFO(sLogger, ("PrometheusInputRunner", "unregister")); auto res = std::async(launch::async, [this]() { + std::lock_guard lock(mRegisterMutex); for (int retry = 0; retry < 3; ++retry) { sdk::HttpMessage httpResponse = SendRegisterMessage(prometheus::UNREGISTER_COLLECTOR_PATH); if (httpResponse.statusCode != 200) { @@ -152,6 +162,12 @@ void PrometheusInputRunner::Stop() { } }); } + LOG_INFO(sLogger, ("PrometheusInputRunner", "Stop")); +} + +bool PrometheusInputRunner::HasRegisteredPlugins() const { + ReadLock lock(mSubscriberMapRWLock); + return !mTargetSubscriberSchedulerMap.empty(); } sdk::HttpMessage PrometheusInputRunner::SendRegisterMessage(const string& url) const { @@ -159,6 +175,10 @@ sdk::HttpMessage PrometheusInputRunner::SendRegisterMessage(const string& url) c httpHeader[sdk::X_LOG_REQUEST_ID] = prometheus::PROMETHEUS_PREFIX + mPodName; sdk::HttpMessage httpResponse; httpResponse.header[sdk::X_LOG_REQUEST_ID] = prometheus::PROMETHEUS_PREFIX + mPodName; +#ifdef APSARA_UNIT_TEST_MAIN + httpResponse.statusCode = 200; + return httpResponse; +#endif try { mClient->Send(sdk::HTTP_GET, mServiceHost, @@ -177,10 +197,6 @@ sdk::HttpMessage PrometheusInputRunner::SendRegisterMessage(const string& url) c return httpResponse; } -bool PrometheusInputRunner::HasRegisteredPlugin() { - ReadLock lock(mSubscriberMapRWLock); - return !mTargetSubscriberSchedulerMap.empty(); -} void PrometheusInputRunner::CancelAllTargetSubscriber() { ReadLock lock(mSubscriberMapRWLock); diff --git a/core/prometheus/PrometheusInputRunner.h b/core/prometheus/PrometheusInputRunner.h index c15f547660..c83070f631 100644 --- a/core/prometheus/PrometheusInputRunner.h +++ b/core/prometheus/PrometheusInputRunner.h @@ -23,17 +23,19 @@ #include "common/Lock.h" #include "common/timer/Timer.h" #include "prometheus/schedulers/TargetSubscriberScheduler.h" +#include "runner/InputRunner.h" #include "sdk/Common.h" #include "sdk/CurlImp.h" namespace logtail { -class PrometheusInputRunner { +class PrometheusInputRunner : public InputRunner { public: PrometheusInputRunner(const PrometheusInputRunner&) = delete; PrometheusInputRunner(PrometheusInputRunner&&) = delete; PrometheusInputRunner& operator=(const PrometheusInputRunner&) = delete; PrometheusInputRunner& operator=(PrometheusInputRunner&&) = delete; + ~PrometheusInputRunner() override = default; static PrometheusInputRunner* GetInstance() { static PrometheusInputRunner sInstance; return &sInstance; @@ -44,21 +46,20 @@ class PrometheusInputRunner { void RemoveScrapeInput(const std::string& jobName); // target discover and scrape - void Start(); - void Stop(); - bool HasRegisteredPlugin(); + void Init() override; + void Stop() override; + bool HasRegisteredPlugins() const override; private: PrometheusInputRunner(); - ~PrometheusInputRunner() = default; - sdk::HttpMessage SendRegisterMessage(const std::string& url) const; void CancelAllTargetSubscriber(); - std::atomic mIsStarted; + bool mIsStarted = false; + std::mutex mStartMutex; - std::future mThreadRes; + std::mutex mRegisterMutex; std::atomic mIsThreadRunning = true; std::unique_ptr mClient; @@ -69,10 +70,10 @@ class PrometheusInputRunner { std::shared_ptr mTimer; - ReadWriteLock mSubscriberMapRWLock; + mutable ReadWriteLock mSubscriberMapRWLock; std::map> mTargetSubscriberSchedulerMap; - uint64_t mUnRegisterMs; + std::atomic mUnRegisterMs; #ifdef APSARA_UNIT_TEST_MAIN friend class PrometheusInputRunnerUnittest; diff --git a/core/prometheus/Utils.cpp b/core/prometheus/Utils.cpp index 4a9be100cb..d1a4d8021e 100644 --- a/core/prometheus/Utils.cpp +++ b/core/prometheus/Utils.cpp @@ -1,11 +1,9 @@ #include "prometheus/Utils.h" #include -#include -#include -#include #include "common/StringTools.h" +#include "models/StringView.h" using namespace std; @@ -42,4 +40,31 @@ uint64_t DurationToSecond(const std::string& duration) { return 60; } +bool IsValidMetric(const StringView& line) { + for (auto c : line) { + if (c == ' ' || c == '\t') { + continue; + } + if (c == '#') { + return false; + } + return true; + } + return false; +} + +void SplitStringView(const std::string& s, char delimiter, std::vector& result) { + size_t start = 0; + size_t end = 0; + + while ((end = s.find(delimiter, start)) != std::string::npos) { + result.emplace_back(s.data() + start, end - start); + start = end + 1; + } + if (start < s.size()) { + result.emplace_back(s.data() + start, s.size() - start); + } +} + + } // namespace logtail diff --git a/core/prometheus/Utils.h b/core/prometheus/Utils.h index c791583cc5..9c61e36b98 100644 --- a/core/prometheus/Utils.h +++ b/core/prometheus/Utils.h @@ -2,10 +2,18 @@ #include + +#include "models/StringView.h" + namespace logtail { std::string URLEncode(const std::string& value); std::string SecondToDuration(uint64_t duration); uint64_t DurationToSecond(const std::string& duration); + +bool IsValidMetric(const StringView& line); + +void SplitStringView(const std::string& s, char delimiter, std::vector& result); + } // namespace logtail diff --git a/core/prometheus/async/PromFuture.cpp b/core/prometheus/async/PromFuture.cpp index 404c9a9021..ca7339d319 100644 --- a/core/prometheus/async/PromFuture.cpp +++ b/core/prometheus/async/PromFuture.cpp @@ -4,11 +4,11 @@ namespace logtail { -void PromFuture::Process(const HttpResponse& response) { +void PromFuture::Process(const HttpResponse& response, uint64_t timestampMilliSec) { WriteLock lock(mStateRWLock); if (mState == PromFutureState::New) { for (auto& callback : mDoneCallbacks) { - callback(response); + callback(response, timestampMilliSec); } mState = PromFutureState::Done; } else { @@ -16,7 +16,7 @@ void PromFuture::Process(const HttpResponse& response) { } } -void PromFuture::AddDoneCallback(std::function&& callback) { +void PromFuture::AddDoneCallback(std::function&& callback) { mDoneCallbacks.emplace_back(std::move(callback)); } diff --git a/core/prometheus/async/PromFuture.h b/core/prometheus/async/PromFuture.h index 7cf92c8868..63de4b59d1 100644 --- a/core/prometheus/async/PromFuture.h +++ b/core/prometheus/async/PromFuture.h @@ -10,9 +10,9 @@ enum class PromFutureState { New, Processing, Done }; class PromFuture { public: // Process should support oneshot and streaming mode. - void Process(const HttpResponse&); + void Process(const HttpResponse&, uint64_t timestampMilliSec); - void AddDoneCallback(std::function&& callback); + void AddDoneCallback(std::function&& callback); void Cancel(); @@ -20,7 +20,11 @@ class PromFuture { PromFutureState mState = {PromFutureState::New}; ReadWriteLock mStateRWLock; - std::vector> mDoneCallbacks; + std::vector> mDoneCallbacks; + +#ifdef APSARA_UNIT_TEST_MAIN + friend class ScrapeSchedulerUnittest; +#endif }; } // namespace logtail \ No newline at end of file diff --git a/core/prometheus/async/PromHttpRequest.cpp b/core/prometheus/async/PromHttpRequest.cpp index 3f8b4c3512..e34a39e34a 100644 --- a/core/prometheus/async/PromHttpRequest.cpp +++ b/core/prometheus/async/PromHttpRequest.cpp @@ -23,7 +23,7 @@ PromHttpRequest::PromHttpRequest(const std::string& method, } void PromHttpRequest::OnSendDone(const HttpResponse& response) { - mFuture->Process(response); + mFuture->Process(response, mLastSendTime * 1000); } [[nodiscard]] bool PromHttpRequest::IsContextValid() const { diff --git a/core/prometheus/labels/TextParser.cpp b/core/prometheus/labels/TextParser.cpp index 8f3b0e33d0..dbdc4ffcf2 100644 --- a/core/prometheus/labels/TextParser.cpp +++ b/core/prometheus/labels/TextParser.cpp @@ -16,215 +16,335 @@ #include "prometheus/labels/TextParser.h" -#include - #include -#include +#include #include -#include -#include -#include #include #include "common/StringTools.h" #include "logger/Logger.h" #include "models/MetricEvent.h" +#include "models/PipelineEventGroup.h" +#include "models/StringView.h" #include "prometheus/Constants.h" +#include "prometheus/Utils.h" using namespace std; namespace logtail { -const std::string SAMPLE_RE = R"""(^(?P\w+)(\{(?P[^}]+)\})?\s+(?P\S+)(\s+(?P\S+))?)"""; - -PipelineEventGroup TextParser::Parse(const string& content) { - auto now = std::chrono::system_clock::now(); - auto duration_since_epoch = now.time_since_epoch(); - auto seconds_since_epoch = std::chrono::duration_cast(duration_since_epoch); - std::time_t defaultTsInSecs = seconds_since_epoch.count(); - return Parse(content, defaultTsInSecs, "", ""); -} - -bool TextParser::ParseLine(const string& line, MetricEvent& e, time_t defaultTsInSecs) { - string argName; - string argLabels; - string argUnwrappedLabels; - string argValue; - string argSuffix; - string argTimestamp; - if (RE2::FullMatch(line, - mSampleRegex, - RE2::Arg(&argName), - RE2::Arg(&argLabels), - RE2::Arg(&argUnwrappedLabels), - RE2::Arg(&argValue), - RE2::Arg(&argSuffix), - RE2::Arg(&argTimestamp)) - == false) { - return false; - } - - // skip any sample that has no name - if (argName.empty()) { - return false; - } - - // skip any sample that has a NaN value - double value = 0; - try { - value = stod(argValue); - } catch (const exception&) { - LOG_WARNING(sLogger, ("invalid value", argValue)("raw line", line)); - return false; - } - if (isnan(value)) { - return false; - } +bool IsValidNumberChar(char c) { + static const unordered_set sValidChars + = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '-', '+', 'e', + 'E', 'I', 'N', 'F', 'T', 'Y', 'i', 'n', 'f', 't', 'y', 'X', 'x'}; + return sValidChars.count(c); +}; - // set timestamp to `defaultTsInSecs` if timestamp is empty, otherwise parse it - // if timestamp is not empty but not a valid integer, skip it - time_t timestamp = 0; - if (argTimestamp.empty()) { - timestamp = defaultTsInSecs; - } else { - try { - if (argTimestamp.length() > 3) { - timestamp = stol(argTimestamp.substr(0, argTimestamp.length() - 3)); - } else { - timestamp = 0; - } - } catch (const exception&) { - LOG_WARNING(sLogger, ("invalid value", argTimestamp)("raw line", line)); - return false; +PipelineEventGroup TextParser::Parse(const string& content, uint64_t defaultTimestamp, uint32_t defaultNanoTs) { + auto eGroup = PipelineEventGroup(make_shared()); + vector lines; + // pre-reserve vector size by 1024 which is experience value per line + lines.reserve(content.size() / 1024); + SplitStringView(content, '\n', lines); + for (const auto& line : lines) { + if (!IsValidMetric(line)) { + continue; + } + auto metricEvent = eGroup.CreateMetricEvent(); + if (ParseLine(line, defaultTimestamp, defaultNanoTs, *metricEvent)) { + eGroup.MutableEvents().emplace_back(std::move(metricEvent)); } } - e.SetName(argName); - e.SetTimestamp(timestamp); - e.SetValue(value); + return eGroup; +} - if (!argUnwrappedLabels.empty()) { - string kvPair; - istringstream iss(argUnwrappedLabels); - while (getline(iss, kvPair, ',')) { - kvPair = TrimString(kvPair); +PipelineEventGroup TextParser::BuildLogGroup(const string& content) { + PipelineEventGroup eGroup(std::make_shared()); - size_t equalsPos = kvPair.find('='); - if (equalsPos != string::npos) { - string key = kvPair.substr(0, equalsPos); - string value = kvPair.substr(equalsPos + 1); - value = TrimString(value, '\"', '\"'); - e.SetTag(key, value); - } + vector lines; + // pre-reserve vector size by 1024 which is experience value per line + lines.reserve(content.size() / 1024); + SplitStringView(content, '\n', lines); + for (const auto& line : lines) { + if (!IsValidMetric(line)) { + continue; } + auto* logEvent = eGroup.AddLogEvent(); + logEvent->SetContent(prometheus::PROMETHEUS, line); } - return true; + + return eGroup; } -PipelineEventGroup -TextParser::Parse(const string& content, const time_t defaultTsInSecs, const string& jobName, const string& instance) { - string line; - string argName, argLabels, argUnwrappedLabels, argValue, argSuffix, argTimestamp; - istringstream iss(content); - auto eGroup = PipelineEventGroup(make_shared()); - while (getline(iss, line)) { - // trim line - line = TrimString(line); +bool TextParser::ParseLine(StringView line, + uint64_t defaultTimestamp, + uint32_t defaultNanoTs, + MetricEvent& metricEvent) { + mLine = line; + mPos = 0; + mState = TextState::Start; + mLabelName.clear(); + mTokenLength = 0; + if (defaultTimestamp > 0) { + mTimestamp = defaultTimestamp; + mNanoTimestamp = defaultNanoTs; + } - // skip any empty line - if (line.empty()) { - continue; - } + HandleStart(metricEvent); - // skip any comment - if (line[0] == '#') { - continue; - } + if (mState == TextState::Done) { + return true; + } - // parse line - // for given sample R"""(test_metric{k1="v1", k2="v2"} 9.9410452992e+10 1715829785083)""" - // argName = "test_metric" - // argLabels = R"""({"k1="v1", k2="v2"})""" - // argUnwrappedLabels = R"""(k1="v1", k2="v2")""" - // argValue = "9.9410452992e+10" - // argSuffix = " 1715829785083" - // argTimestamp = "1715829785083" - if (RE2::FullMatch(line, - mSampleRegex, - RE2::Arg(&argName), - RE2::Arg(&argLabels), - RE2::Arg(&argUnwrappedLabels), - RE2::Arg(&argValue), - RE2::Arg(&argSuffix), - RE2::Arg(&argTimestamp)) - == false) { - continue; - } + return false; +} - // skip any sample that has no name - if (argName.empty()) { - continue; +// start to parse metric sample:test_metric{k1="v1", k2="v2" } 9.9410452992e+10 1715829785083 # exemplarsxxx +void TextParser::HandleStart(MetricEvent& metricEvent) { + SkipLeadingWhitespace(); + auto c = (mPos < mLine.size()) ? mLine[mPos] : '\0'; + if (std::isalpha(c) || c == '_' || c == ':') { + HandleMetricName(metricEvent); + } else { + HandleError("expected metric name"); + } +} + +// parse:test_metric{k1="v1", k2="v2" } 9.9410452992e+10 1715829785083 # exemplarsxxx +void TextParser::HandleMetricName(MetricEvent& metricEvent) { + char c = (mPos < mLine.size()) ? mLine[mPos] : '\0'; + while (std::isalpha(c) || c == '_' || c == ':' || std::isdigit(c)) { + ++mTokenLength; + ++mPos; + c = (mPos < mLine.size()) ? mLine[mPos] : '\0'; + } + metricEvent.SetNameNoCopy(mLine.substr(mPos - mTokenLength, mTokenLength)); + mTokenLength = 0; + SkipLeadingWhitespace(); + if (mPos < mLine.size()) { + if (mLine[mPos] == '{') { + ++mPos; + SkipLeadingWhitespace(); + HandleLabelName(metricEvent); + } else { + HandleSampleValue(metricEvent); } + } else { + HandleError("error end of metric name"); + } +} - // skip any sample that has a NaN value - double value = 0; - try { - value = stod(argValue); - } catch (const exception&) { - LOG_WARNING(sLogger, ("invalid value", argValue)("raw line", line)); - continue; +// parse:k1="v1", k2="v2" } 9.9410452992e+10 1715829785083 # exemplarsxxx +void TextParser::HandleLabelName(MetricEvent& metricEvent) { + char c = (mPos < mLine.size()) ? mLine[mPos] : '\0'; + if (std::isalpha(c) || c == '_') { + while (std::isalpha(c) || c == '_' || std::isdigit(c)) { + ++mTokenLength; + ++mPos; + c = (mPos < mLine.size()) ? mLine[mPos] : '\0'; } - if (isnan(value)) { - continue; + mLabelName = mLine.substr(mPos - mTokenLength, mTokenLength); + mTokenLength = 0; + SkipLeadingWhitespace(); + if (mPos == mLine.size() || mLine[mPos] != '=') { + HandleError("expected '=' after label name"); + return; } + ++mPos; + SkipLeadingWhitespace(); + HandleEqualSign(metricEvent); + } else if (c == '}') { + ++mPos; + SkipLeadingWhitespace(); + HandleSampleValue(metricEvent); + } else { + HandleError("invalid character in label name"); + } +} + +// parse:"v1", k2="v2" } 9.9410452992e+10 1715829785083 # exemplarsxxx +void TextParser::HandleEqualSign(MetricEvent& metricEvent) { + if (mPos < mLine.size() && mLine[mPos] == '"') { + ++mPos; + HandleLabelValue(metricEvent); + } else { + HandleError("expected '\"' after '='"); + } +} - // set timestamp to `defaultTsInSecs` if timestamp is empty, otherwise parse it - // if timestamp is not empty but not a valid integer, skip it - time_t timestamp = 0; - if (argTimestamp.empty()) { - timestamp = defaultTsInSecs; +// parse:v1", k2="v2" } 9.9410452992e+10 1715829785083 # exemplarsxxx +void TextParser::HandleLabelValue(MetricEvent& metricEvent) { + // left quote has been consumed + // LableValue supports escape char + bool escaped = false; + auto lPos = mPos; + while (mPos < mLine.size() && mLine[mPos] != '"') { + if (mLine[mPos] != '\\') { + if (escaped) { + mEscapedLabelValue.push_back(mLine[mPos]); + } + ++mPos; + ++mTokenLength; } else { - try { - if (argTimestamp.length() > 3) { - timestamp = stol(argTimestamp.substr(0, argTimestamp.length() - 3)); - } else { - timestamp = 0; - } - } catch (const exception&) { - LOG_WARNING(sLogger, ("invalid value", argTimestamp)("raw line", line)); - continue; + if (escaped == false) { + // first meet escape char + escaped = true; + mEscapedLabelValue = mLine.substr(lPos, mPos - lPos).to_string(); } - } - - MetricEvent* e = eGroup.AddMetricEvent(); - e->SetName(argName); - e->SetTimestamp(timestamp); - e->SetValue(value); - - if (!argUnwrappedLabels.empty()) { - string kvPair; - istringstream iss(argUnwrappedLabels); - while (getline(iss, kvPair, ',')) { - kvPair = TrimString(kvPair); - - size_t equalsPos = kvPair.find('='); - if (equalsPos != string::npos) { - string key = kvPair.substr(0, equalsPos); - string value = kvPair.substr(equalsPos + 1); - value = TrimString(value, '\"', '\"'); - e->SetTag(key, value); + if (mPos + 1 < mLine.size()) { + // check next char, if it is valid escape char, we can consume two chars and push one escaped char + // if not, we neet to push the two chars + // valid escape char: \", \\, \n + switch (mLine[lPos + 1]) { + case '\\': + case '\"': + mEscapedLabelValue.push_back(mLine[mPos + 1]); + break; + case 'n': + mEscapedLabelValue.push_back('\n'); + break; + default: + mEscapedLabelValue.push_back('\\'); + mEscapedLabelValue.push_back(mLine[mPos + 1]); + break; } + mPos += 2; + } else { + mEscapedLabelValue.push_back(mLine[mPos + 1]); + ++mPos; } } - if (!jobName.empty()) { - e->SetTag(string(prometheus::JOB), jobName); - } - if (!instance.empty()) { - e->SetTag(prometheus::INSTANCE, instance); - } } - return eGroup; + if (mPos == mLine.size()) { + HandleError("unexpected end of input in label value"); + return; + } + + if (!escaped) { + metricEvent.SetTagNoCopy(mLabelName, mLine.substr(mPos - mTokenLength, mTokenLength)); + } else { + metricEvent.SetTag(mLabelName.to_string(), mEscapedLabelValue); + mEscapedLabelValue.clear(); + } + mTokenLength = 0; + ++mPos; + SkipLeadingWhitespace(); + if (mPos < mLine.size() && (mLine[mPos] == ',' || mLine[mPos] == '}')) { + HandleCommaOrCloseBrace(metricEvent); + } else { + HandleError("unexpected end of input in label value"); + } +} + +// parse:, k2="v2" } 9.9410452992e+10 1715829785083 # exemplarsxxx +// or parse:} 9.9410452992e+10 1715829785083 # exemplarsxxx +void TextParser::HandleCommaOrCloseBrace(MetricEvent& metricEvent) { + char c = (mPos < mLine.size()) ? mLine[mPos] : '\0'; + if (c == ',') { + ++mPos; + SkipLeadingWhitespace(); + HandleLabelName(metricEvent); + } else if (c == '}') { + ++mPos; + SkipLeadingWhitespace(); + HandleSampleValue(metricEvent); + } else { + HandleError("expected ',' or '}' after label value"); + } +} + +// parse:9.9410452992e+10 1715829785083 # exemplarsxxx +void TextParser::HandleSampleValue(MetricEvent& metricEvent) { + while (mPos < mLine.size() && IsValidNumberChar(mLine[mPos])) { + ++mPos; + ++mTokenLength; + } + + if (mPos < mLine.size() && mLine[mPos] != ' ' && mLine[mPos] != '\t') { + HandleError("unexpected end of input in sample value"); + return; + } + + auto tmpSampleValue = mLine.substr(mPos - mTokenLength, mTokenLength); + mDoubleStr = tmpSampleValue.to_string(); + + try { + mSampleValue = std::stod(mDoubleStr); + } catch (...) { + HandleError("invalid sample value"); + mTokenLength = 0; + return; + } + mDoubleStr.clear(); + + metricEvent.SetValue(mSampleValue); + mTokenLength = 0; + SkipLeadingWhitespace(); + if (mPos == mLine.size()) { + metricEvent.SetTimestamp(mTimestamp, mNanoTimestamp); + mState = TextState::Done; + } else { + HandleTimestamp(metricEvent); + } +} + +// parse:1715829785083 # exemplarsxxx +// timestamp will be 1715829785.083 in OpenMetrics +void TextParser::HandleTimestamp(MetricEvent& metricEvent) { + // '#' is for exemplars, and we don't need it + while (mPos < mLine.size() && IsValidNumberChar(mLine[mPos])) { + ++mPos; + ++mTokenLength; + } + if (mPos < mLine.size() && mLine[mPos] != ' ' && mLine[mPos] != '\t' && mLine[mPos] != '#') { + HandleError("unexpected end of input in sample timestamp"); + return; + } + + auto tmpTimestamp = mLine.substr(mPos - mTokenLength, mTokenLength); + if (tmpTimestamp.size() == 0) { + mState = TextState::Done; + return; + } + mDoubleStr = tmpTimestamp.to_string(); + double milliTimestamp = 0; + try { + milliTimestamp = stod(mDoubleStr); + } catch (...) { + HandleError("invalid timestamp"); + mTokenLength = 0; + return; + } + mDoubleStr.clear(); + + if (milliTimestamp > 1ULL << 63) { + HandleError("timestamp overflow"); + mTokenLength = 0; + return; + } + if (milliTimestamp < 1UL << 31) { + milliTimestamp *= 1000; + } + time_t timestamp = (int64_t)milliTimestamp / 1000; + auto ns = ((int64_t)milliTimestamp % 1000) * 1000000; + metricEvent.SetTimestamp(timestamp, ns); + + mTokenLength = 0; + + mState = TextState::Done; +} + +void TextParser::HandleError(const string& errMsg) { + LOG_WARNING(sLogger, ("text parser error parsing line", mLine.to_string() + errMsg)); + mState = TextState::Error; +} + +inline void TextParser::SkipLeadingWhitespace() { + while (mPos < mLine.length() && (mLine[mPos] == ' ' || mLine[mPos] == '\t')) { + mPos++; + } } } // namespace logtail diff --git a/core/prometheus/labels/TextParser.h b/core/prometheus/labels/TextParser.h index dde76ef576..b0f158248e 100644 --- a/core/prometheus/labels/TextParser.h +++ b/core/prometheus/labels/TextParser.h @@ -16,28 +16,51 @@ #pragma once -#include - #include +#include "models/MetricEvent.h" #include "models/PipelineEventGroup.h" namespace logtail { -extern const std::string SAMPLE_RE; +enum class TextState { Start, Done, Error }; class TextParser { public: - TextParser() : mSampleRegex(SAMPLE_RE) {} - PipelineEventGroup Parse(const std::string& content); + TextParser() = default; + PipelineEventGroup Parse(const std::string& content, uint64_t defaultTimestamp, uint32_t defaultNanoTs); + PipelineEventGroup BuildLogGroup(const std::string& content); - PipelineEventGroup - Parse(const std::string& content, std::time_t defaultTs, const std::string& jobName, const std::string& instance); - bool ParseLine(const std::string& line, MetricEvent& e, time_t defaultTs); + bool ParseLine(StringView line, uint64_t defaultTimestamp, uint32_t defaultNanoTs, MetricEvent& metricEvent); private: - RE2 mSampleRegex; + void HandleError(const std::string& errMsg); + + void HandleStart(MetricEvent& metricEvent); + void HandleMetricName(MetricEvent& metricEvent); + void HandleOpenBrace(MetricEvent& metricEvent); + void HandleLabelName(MetricEvent& metricEvent); + void HandleEqualSign(MetricEvent& metricEvent); + void HandleLabelValue(MetricEvent& metricEvent); + void HandleCommaOrCloseBrace(MetricEvent& metricEvent); + void HandleSampleValue(MetricEvent& metricEvent); + void HandleTimestamp(MetricEvent& metricEvent); + void HandleSpace(MetricEvent& metricEvent); + + inline void SkipLeadingWhitespace(); + + TextState mState{TextState::Start}; + StringView mLine; + std::size_t mPos{0}; + + StringView mLabelName; + std::string mEscapedLabelValue; + double mSampleValue{0.0}; + time_t mTimestamp{0}; + uint32_t mNanoTimestamp{0}; + std::size_t mTokenLength{0}; + std::string mDoubleStr; #ifdef APSARA_UNIT_TEST_MAIN friend class TextParserUnittest; diff --git a/core/prometheus/schedulers/ScrapeConfig.cpp b/core/prometheus/schedulers/ScrapeConfig.cpp index 7d7f74870e..177c9a7e27 100644 --- a/core/prometheus/schedulers/ScrapeConfig.cpp +++ b/core/prometheus/schedulers/ScrapeConfig.cpp @@ -10,15 +10,16 @@ #include "logger/Logger.h" #include "prometheus/Constants.h" #include "prometheus/Utils.h" +#include "sdk/Common.h" using namespace std; namespace logtail { ScrapeConfig::ScrapeConfig() - : mScheme("http"), - mMetricsPath("/metrics"), - mScrapeIntervalSeconds(60), + : mScrapeIntervalSeconds(60), mScrapeTimeoutSeconds(10), + mMetricsPath("/metrics"), + mScheme("http"), mMaxScrapeSizeBytes(-1), mSampleLimit(-1), mSeriesLimit(-1) { @@ -33,12 +34,7 @@ bool ScrapeConfig::Init(const Json::Value& scrapeConfig) { } else { return false; } - if (scrapeConfig.isMember(prometheus::SCHEME) && scrapeConfig[prometheus::SCHEME].isString()) { - mScheme = scrapeConfig[prometheus::SCHEME].asString(); - } - if (scrapeConfig.isMember(prometheus::METRICS_PATH) && scrapeConfig[prometheus::METRICS_PATH].isString()) { - mMetricsPath = scrapeConfig[prometheus::METRICS_PATH].asString(); - } + if (scrapeConfig.isMember(prometheus::SCRAPE_INTERVAL) && scrapeConfig[prometheus::SCRAPE_INTERVAL].isString()) { string tmpScrapeIntervalString = scrapeConfig[prometheus::SCRAPE_INTERVAL].asString(); mScrapeIntervalSeconds = DurationToSecond(tmpScrapeIntervalString); @@ -47,6 +43,32 @@ bool ScrapeConfig::Init(const Json::Value& scrapeConfig) { string tmpScrapeTimeoutString = scrapeConfig[prometheus::SCRAPE_TIMEOUT].asString(); mScrapeTimeoutSeconds = DurationToSecond(tmpScrapeTimeoutString); } + if (scrapeConfig.isMember(prometheus::METRICS_PATH) && scrapeConfig[prometheus::METRICS_PATH].isString()) { + mMetricsPath = scrapeConfig[prometheus::METRICS_PATH].asString(); + } + if (scrapeConfig.isMember(prometheus::SCHEME) && scrapeConfig[prometheus::SCHEME].isString()) { + mScheme = scrapeConfig[prometheus::SCHEME].asString(); + } + + // basic auth, authorization, oauth2 + // basic auth, authorization, oauth2 cannot be used at the same time + if ((int)scrapeConfig.isMember(prometheus::BASIC_AUTH) + scrapeConfig.isMember(prometheus::AUTHORIZATION) > 1) { + LOG_ERROR(sLogger, ("basic auth and authorization cannot be used at the same time", "")); + return false; + } + if (scrapeConfig.isMember(prometheus::BASIC_AUTH) && scrapeConfig[prometheus::BASIC_AUTH].isObject()) { + if (!InitBasicAuth(scrapeConfig[prometheus::BASIC_AUTH])) { + LOG_ERROR(sLogger, ("basic auth config error", "")); + return false; + } + } + if (scrapeConfig.isMember(prometheus::AUTHORIZATION) && scrapeConfig[prometheus::AUTHORIZATION].isObject()) { + if (!InitAuthorization(scrapeConfig[prometheus::AUTHORIZATION])) { + LOG_ERROR(sLogger, ("authorization config error", "")); + return false; + } + } + // : a size in bytes, e.g. 512MB. A unit is required. Supported units: B, KB, MB, GB, TB, PB, EB. if (scrapeConfig.isMember(prometheus::MAX_SCRAPE_SIZE) && scrapeConfig[prometheus::MAX_SCRAPE_SIZE].isString()) { string tmpMaxScrapeSize = scrapeConfig[prometheus::MAX_SCRAPE_SIZE].asString(); @@ -104,20 +126,6 @@ bool ScrapeConfig::Init(const Json::Value& scrapeConfig) { } } - if (scrapeConfig.isMember(prometheus::AUTHORIZATION) && scrapeConfig[prometheus::AUTHORIZATION].isObject()) { - string type = scrapeConfig[prometheus::AUTHORIZATION][prometheus::TYPE].asString(); - string bearerToken; - bool b - = ReadFile(scrapeConfig[prometheus::AUTHORIZATION][prometheus::CREDENTIALS_FILE].asString(), bearerToken); - if (!b) { - LOG_ERROR(sLogger, - ("read credentials_file failed, credentials_file", - scrapeConfig[prometheus::AUTHORIZATION][prometheus::CREDENTIALS_FILE].asString())); - return false; - } - mHeaders[prometheus::A_UTHORIZATION] = type + " " + bearerToken; - } - for (const auto& relabelConfig : scrapeConfig[prometheus::RELABEL_CONFIGS]) { mRelabelConfigs.emplace_back(relabelConfig); } @@ -138,4 +146,81 @@ bool ScrapeConfig::Init(const Json::Value& scrapeConfig) { return true; } + +bool ScrapeConfig::InitBasicAuth(const Json::Value& basicAuth) { + string username; + string usernameFile; + string password; + string passwordFile; + if (basicAuth.isMember(prometheus::USERNAME) && basicAuth[prometheus::USERNAME].isString()) { + username = basicAuth[prometheus::USERNAME].asString(); + } + if (basicAuth.isMember(prometheus::USERNAME_FILE) && basicAuth[prometheus::USERNAME_FILE].isString()) { + usernameFile = basicAuth[prometheus::USERNAME_FILE].asString(); + } + if (basicAuth.isMember(prometheus::PASSWORD) && basicAuth[prometheus::PASSWORD].isString()) { + password = basicAuth[prometheus::PASSWORD].asString(); + } + if (basicAuth.isMember(prometheus::PASSWORD_FILE) && basicAuth[prometheus::PASSWORD_FILE].isString()) { + passwordFile = basicAuth[prometheus::PASSWORD_FILE].asString(); + } + + if ((username.empty() && usernameFile.empty()) || (password.empty() && passwordFile.empty())) { + LOG_ERROR(sLogger, ("basic auth username or password is empty", "")); + return false; + } + if ((!username.empty() && !usernameFile.empty()) || (!password.empty() && !passwordFile.empty())) { + LOG_ERROR(sLogger, ("basic auth config error", "")); + return false; + } + if (!usernameFile.empty() && !ReadFile(usernameFile, username)) { + LOG_ERROR(sLogger, ("read username_file failed, username_file", usernameFile)); + return false; + } + + if (!passwordFile.empty() && !ReadFile(passwordFile, password)) { + LOG_ERROR(sLogger, ("read password_file failed, password_file", passwordFile)); + return false; + } + + auto token = username + ":" + password; + auto token64 = sdk::Base64Enconde(token); + mAuthHeaders[prometheus::A_UTHORIZATION] = prometheus::BASIC_PREFIX + token64; + return true; +} + +bool ScrapeConfig::InitAuthorization(const Json::Value& authorization) { + string type; + string credentials; + string credentialsFile; + + if (authorization.isMember(prometheus::TYPE) && authorization[prometheus::TYPE].isString()) { + type = authorization[prometheus::TYPE].asString(); + } + // if not set, use default type Bearer + if (type.empty()) { + type = prometheus::AUTHORIZATION_DEFAULT_TYEP; + } + + if (authorization.isMember(prometheus::CREDENTIALS) && authorization[prometheus::CREDENTIALS].isString()) { + credentials = authorization[prometheus::CREDENTIALS].asString(); + } + if (authorization.isMember(prometheus::CREDENTIALS_FILE) + && authorization[prometheus::CREDENTIALS_FILE].isString()) { + credentialsFile = authorization[prometheus::CREDENTIALS_FILE].asString(); + } + if (!credentials.empty() && !credentialsFile.empty()) { + LOG_ERROR(sLogger, ("authorization config error", "")); + return false; + } + + if (!credentialsFile.empty() && !ReadFile(credentialsFile, credentials)) { + LOG_ERROR(sLogger, ("authorization read file error", "")); + return false; + } + + mAuthHeaders[prometheus::A_UTHORIZATION] = type + " " + credentials; + return true; +} + } // namespace logtail \ No newline at end of file diff --git a/core/prometheus/schedulers/ScrapeConfig.h b/core/prometheus/schedulers/ScrapeConfig.h index 3c8294f11a..4f8e866cbf 100644 --- a/core/prometheus/schedulers/ScrapeConfig.h +++ b/core/prometheus/schedulers/ScrapeConfig.h @@ -11,28 +11,36 @@ namespace logtail { + class ScrapeConfig { public: std::string mJobName; - std::string mScheme; - std::string mMetricsPath; int64_t mScrapeIntervalSeconds; int64_t mScrapeTimeoutSeconds; + std::string mMetricsPath; + std::string mScheme; + + std::map mAuthHeaders; + int64_t mMaxScrapeSizeBytes; int64_t mSampleLimit; int64_t mSeriesLimit; std::vector mRelabelConfigs; std::map> mParams; - std::map mHeaders; std::string mQueryString; ScrapeConfig(); bool Init(const Json::Value& config); +private: + bool InitBasicAuth(const Json::Value& basicAuth); + bool InitAuthorization(const Json::Value& authorization); + #ifdef APSARA_UNIT_TEST_MAIN friend class ScrapeConfigUnittest; #endif }; + } // namespace logtail \ No newline at end of file diff --git a/core/prometheus/schedulers/ScrapeScheduler.cpp b/core/prometheus/schedulers/ScrapeScheduler.cpp index b1aa779a8c..30f8f601d0 100644 --- a/core/prometheus/schedulers/ScrapeScheduler.cpp +++ b/core/prometheus/schedulers/ScrapeScheduler.cpp @@ -31,9 +31,9 @@ #include "logger/Logger.h" #include "prometheus/Constants.h" #include "prometheus/async/PromHttpRequest.h" -#include "queue/ProcessQueueItem.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKey.h" +#include "pipeline/queue/ProcessQueueItem.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKey.h" using namespace std; @@ -55,43 +55,42 @@ ScrapeScheduler::ScrapeScheduler(std::shared_ptr scrapeConfigPtr, + mScrapeConfigPtr->mMetricsPath + (mScrapeConfigPtr->mQueryString.empty() ? "" : "?" + mScrapeConfigPtr->mQueryString); mHash = mScrapeConfigPtr->mJobName + tmpTargetURL + ToString(mLabels.Hash()); + mInstance = mHost + ":" + ToString(mPort); mInterval = mScrapeConfigPtr->mScrapeIntervalSeconds; -} -bool ScrapeScheduler::operator<(const ScrapeScheduler& other) const { - return mHash < other.mHash; + mParser = make_unique(); } -void ScrapeScheduler::OnMetricResult(const HttpResponse& response) { - // TODO(liqiang): get scrape timestamp - time_t timestamp = time(nullptr); +void ScrapeScheduler::OnMetricResult(const HttpResponse& response, uint64_t timestampMilliSec) { + mScrapeTimestampMilliSec = timestampMilliSec; + mScrapeDurationSeconds = 1.0 * (GetCurrentTimeInMilliSeconds() - timestampMilliSec) / 1000; + mScrapeResponseSizeBytes = response.mBody.size(); + mUpState = response.mStatusCode == 200; if (response.mStatusCode != 200) { + mScrapeResponseSizeBytes = 0; string headerStr; - for (const auto& [k, v] : mScrapeConfigPtr->mHeaders) { + for (const auto& [k, v] : mScrapeConfigPtr->mAuthHeaders) { headerStr.append(k).append(":").append(v).append(";"); } LOG_WARNING(sLogger, ("scrape failed, status code", response.mStatusCode)("target", mHash)("http header", headerStr)); - return; } - auto eventGroup = BuildPipelineEventGroup(response.mBody, timestamp); + auto eventGroup = BuildPipelineEventGroup(response.mBody); + SetAutoMetricMeta(eventGroup); PushEventGroup(std::move(eventGroup)); } -PipelineEventGroup ScrapeScheduler::BuildPipelineEventGroup(const std::string& content, time_t timestamp) { - PipelineEventGroup eGroup(std::make_shared()); - for (const auto& line : SplitString(content, "\r\n")) { - auto newLine = TrimString(line); - if (newLine.empty() || newLine[0] == '#') { - continue; - } - auto* logEvent = eGroup.AddLogEvent(); - logEvent->SetContent(prometheus::PROMETHEUS, newLine); - logEvent->SetTimestamp(timestamp); - } +void ScrapeScheduler::SetAutoMetricMeta(PipelineEventGroup& eGroup) { + eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, ToString(mScrapeTimestampMilliSec)); + eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION, ToString(mScrapeDurationSeconds)); + eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE, ToString(mScrapeResponseSizeBytes)); + eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_INSTANCE, mInstance); + eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE, ToString(mUpState)); +} - return eGroup; +PipelineEventGroup ScrapeScheduler::BuildPipelineEventGroup(const std::string& content) { + return mParser->BuildLogGroup(content); } void ScrapeScheduler::PushEventGroup(PipelineEventGroup&& eGroup) { @@ -108,8 +107,8 @@ string ScrapeScheduler::GetId() const { void ScrapeScheduler::ScheduleNext() { auto future = std::make_shared(); - future->AddDoneCallback([this](const HttpResponse& response) { - this->OnMetricResult(response); + future->AddDoneCallback([this](const HttpResponse& response, uint64_t timestampMilliSec) { + this->OnMetricResult(response, timestampMilliSec); this->ExecDone(); this->ScheduleNext(); }); @@ -130,7 +129,9 @@ void ScrapeScheduler::ScheduleNext() { void ScrapeScheduler::ScrapeOnce(std::chrono::steady_clock::time_point execTime) { auto future = std::make_shared(); - future->AddDoneCallback([this](const HttpResponse& response) { this->OnMetricResult(response); }); + future->AddDoneCallback([this](const HttpResponse& response, uint64_t timestampMilliSec) { + this->OnMetricResult(response, timestampMilliSec); + }); mFuture = future; auto event = BuildScrapeTimerEvent(execTime); if (mTimer) { @@ -145,7 +146,7 @@ std::unique_ptr ScrapeScheduler::BuildScrapeTimerEvent(std::chrono:: mPort, mScrapeConfigPtr->mMetricsPath, mScrapeConfigPtr->mQueryString, - mScrapeConfigPtr->mHeaders, + mScrapeConfigPtr->mAuthHeaders, "", mScrapeConfigPtr->mScrapeTimeoutSeconds, mScrapeConfigPtr->mScrapeIntervalSeconds @@ -170,10 +171,9 @@ uint64_t ScrapeScheduler::GetRandSleep() const { uint64_t h = XXH64(key.c_str(), key.length(), 0); uint64_t randSleep = ((double)1.0) * mScrapeConfigPtr->mScrapeIntervalSeconds * (1.0 * h / (double)0xFFFFFFFFFFFFFFFF); - uint64_t sleepOffset - = GetCurrentTimeInNanoSeconds() % (mScrapeConfigPtr->mScrapeIntervalSeconds * 1000ULL * 1000ULL * 1000ULL); + uint64_t sleepOffset = GetCurrentTimeInMilliSeconds() % (mScrapeConfigPtr->mScrapeIntervalSeconds * 1000ULL); if (randSleep < sleepOffset) { - randSleep += mScrapeConfigPtr->mScrapeIntervalSeconds * 1000ULL * 1000ULL * 1000ULL; + randSleep += mScrapeConfigPtr->mScrapeIntervalSeconds * 1000ULL; } randSleep -= sleepOffset; return randSleep; diff --git a/core/prometheus/schedulers/ScrapeScheduler.h b/core/prometheus/schedulers/ScrapeScheduler.h index bcabc5b99e..d4b191443f 100644 --- a/core/prometheus/schedulers/ScrapeScheduler.h +++ b/core/prometheus/schedulers/ScrapeScheduler.h @@ -23,11 +23,12 @@ #include "common/http/HttpResponse.h" #include "common/timer/Timer.h" #include "models/PipelineEventGroup.h" +#include "prometheus/labels/TextParser.h" #include "prometheus/schedulers/ScrapeConfig.h" -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" #ifdef APSARA_UNIT_TEST_MAIN -#include "queue/ProcessQueueItem.h" +#include "pipeline/queue/ProcessQueueItem.h" #endif namespace logtail { @@ -43,9 +44,7 @@ class ScrapeScheduler : public BaseScheduler { ScrapeScheduler(const ScrapeScheduler&) = default; ~ScrapeScheduler() override = default; - bool operator<(const ScrapeScheduler& other) const; - - void OnMetricResult(const HttpResponse&); + void OnMetricResult(const HttpResponse&, uint64_t timestampMilliSec); void SetTimer(std::shared_ptr timer); std::string GetId() const; @@ -58,8 +57,9 @@ class ScrapeScheduler : public BaseScheduler { private: void PushEventGroup(PipelineEventGroup&&); + void SetAutoMetricMeta(PipelineEventGroup& eGroup); - PipelineEventGroup BuildPipelineEventGroup(const std::string& content, time_t timestampNs); + PipelineEventGroup BuildPipelineEventGroup(const std::string& content); std::unique_ptr BuildScrapeTimerEvent(std::chrono::steady_clock::time_point execTime); @@ -68,11 +68,20 @@ class ScrapeScheduler : public BaseScheduler { std::string mHash; std::string mHost; int32_t mPort; + std::string mInstance; Labels mLabels; + std::unique_ptr mParser; + QueueKey mQueueKey; size_t mInputIndex; std::shared_ptr mTimer; + + // auto metrics + uint64_t mScrapeTimestampMilliSec = 0; + double mScrapeDurationSeconds = 0; + uint64_t mScrapeResponseSizeBytes = 0; + bool mUpState = true; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParsePrometheusMetricUnittest; friend class ScrapeSchedulerUnittest; diff --git a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp index 3dde1a82c5..4539a433db 100644 --- a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp +++ b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp @@ -58,7 +58,7 @@ bool TargetSubscriberScheduler::operator<(const TargetSubscriberScheduler& other return mJobName < other.mJobName; } -void TargetSubscriberScheduler::OnSubscription(const HttpResponse& response) { +void TargetSubscriberScheduler::OnSubscription(const HttpResponse& response, uint64_t) { if (response.mStatusCode == 304) { // not modified return; @@ -104,12 +104,12 @@ void TargetSubscriberScheduler::UpdateScrapeScheduler( if (mTimer) { // zero-cost upgrade if (mUnRegisterMs > 0 - && (GetCurrentTimeInNanoSeconds() + v->GetRandSleep() - - (uint64_t)mScrapeConfigPtr->mScrapeIntervalSeconds * 1000000000 - > mUnRegisterMs * 1000000) - && (GetCurrentTimeInNanoSeconds() + v->GetRandSleep() - - (uint64_t)mScrapeConfigPtr->mScrapeIntervalSeconds * 1000000000 * 2 - < mUnRegisterMs * 1000000)) { + && (GetCurrentTimeInMilliSeconds() + v->GetRandSleep() + - (uint64_t)mScrapeConfigPtr->mScrapeIntervalSeconds * 1000 + > mUnRegisterMs) + && (GetCurrentTimeInMilliSeconds() + v->GetRandSleep() + - (uint64_t)mScrapeConfigPtr->mScrapeIntervalSeconds * 1000 * 2 + < mUnRegisterMs)) { // scrape once just now v->ScrapeOnce(std::chrono::steady_clock::now()); } @@ -213,7 +213,7 @@ TargetSubscriberScheduler::BuildScrapeSchedulerSet(std::vector& targetGr scrapeScheduler->SetTimer(mTimer); auto firstExecTime - = std::chrono::steady_clock::now() + std::chrono::nanoseconds(scrapeScheduler->GetRandSleep()); + = std::chrono::steady_clock::now() + std::chrono::milliseconds(scrapeScheduler->GetRandSleep()); scrapeScheduler->SetFirstExecTime(firstExecTime); @@ -232,8 +232,8 @@ string TargetSubscriberScheduler::GetId() const { void TargetSubscriberScheduler::ScheduleNext() { auto future = std::make_shared(); - future->AddDoneCallback([this](const HttpResponse& response) { - this->OnSubscription(response); + future->AddDoneCallback([this](const HttpResponse& response, uint64_t timestampMilliSec) { + this->OnSubscription(response, timestampMilliSec); this->ExecDone(); this->ScheduleNext(); }); diff --git a/core/prometheus/schedulers/TargetSubscriberScheduler.h b/core/prometheus/schedulers/TargetSubscriberScheduler.h index 2a4b02448d..814d0f80aa 100644 --- a/core/prometheus/schedulers/TargetSubscriberScheduler.h +++ b/core/prometheus/schedulers/TargetSubscriberScheduler.h @@ -27,7 +27,7 @@ #include "prometheus/schedulers/BaseScheduler.h" #include "prometheus/schedulers/ScrapeConfig.h" #include "prometheus/schedulers/ScrapeScheduler.h" -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { @@ -40,7 +40,7 @@ class TargetSubscriberScheduler : public BaseScheduler { bool Init(const Json::Value& scrapeConfig); bool operator<(const TargetSubscriberScheduler& other) const; - void OnSubscription(const HttpResponse&); + void OnSubscription(const HttpResponse&, uint64_t); void SetTimer(std::shared_ptr timer); std::string GetId() const; @@ -63,7 +63,8 @@ class TargetSubscriberScheduler : public BaseScheduler { private: bool ParseScrapeSchedulerGroup(const std::string& content, std::vector& scrapeSchedulerGroup); - std::unordered_map> BuildScrapeSchedulerSet(std::vector& scrapeSchedulerGroup); + std::unordered_map> + BuildScrapeSchedulerSet(std::vector& scrapeSchedulerGroup); std::unique_ptr BuildSubscriberTimerEvent(std::chrono::steady_clock::time_point execTime); void UpdateScrapeScheduler(std::unordered_map>&); diff --git a/core/config_server_pb/v1/agent.pb.cc b/core/protobuf/config_server/v1/agent.pb.cc similarity index 100% rename from core/config_server_pb/v1/agent.pb.cc rename to core/protobuf/config_server/v1/agent.pb.cc diff --git a/core/config_server_pb/v1/agent.pb.h b/core/protobuf/config_server/v1/agent.pb.h similarity index 100% rename from core/config_server_pb/v1/agent.pb.h rename to core/protobuf/config_server/v1/agent.pb.h diff --git a/core/config_server_pb/v2/agent.pb.cc b/core/protobuf/config_server/v2/agent.pb.cc similarity index 93% rename from core/config_server_pb/v2/agent.pb.cc rename to core/protobuf/config_server/v2/agent.pb.cc index 6893c169c8..46a430cbf7 100644 --- a/core/config_server_pb/v2/agent.pb.cc +++ b/core/protobuf/config_server/v2/agent.pb.cc @@ -38,9 +38,9 @@ extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf:: extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_AgentGroupTag; extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_CommandDetail; extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_CommandInfo; +extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_CommonResponse; extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_ConfigDetail; extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_ConfigInfo; -extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf::internal::SCCInfo<0> scc_info_ServerErrorResponse; extern PROTOBUF_INTERNAL_EXPORT_protobuf_v2_2fagent_2eproto ::google::protobuf::internal::SCCInfo<1> scc_info_AgentAttributes; } // namespace protobuf_v2_2fagent_2eproto namespace configserver { @@ -86,11 +86,6 @@ class CommandDetailDefaultTypeInternal { ::google::protobuf::internal::ExplicitlyConstructed _instance; } _CommandDetail_default_instance_; -class ServerErrorResponseDefaultTypeInternal { - public: - ::google::protobuf::internal::ExplicitlyConstructed - _instance; -} _ServerErrorResponse_default_instance_; class HeartbeatResponseDefaultTypeInternal { public: ::google::protobuf::internal::ExplicitlyConstructed @@ -106,6 +101,11 @@ class FetchConfigResponseDefaultTypeInternal { ::google::protobuf::internal::ExplicitlyConstructed _instance; } _FetchConfigResponse_default_instance_; +class CommonResponseDefaultTypeInternal { + public: + ::google::protobuf::internal::ExplicitlyConstructed + _instance; +} _CommonResponse_default_instance_; } // namespace v2 } // namespace proto } // namespace configserver @@ -226,20 +226,6 @@ static void InitDefaultsCommandDetail() { ::google::protobuf::internal::SCCInfo<0> scc_info_CommandDetail = {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsCommandDetail}, {}}; -static void InitDefaultsServerErrorResponse() { - GOOGLE_PROTOBUF_VERIFY_VERSION; - - { - void* ptr = &::configserver::proto::v2::_ServerErrorResponse_default_instance_; - new (ptr) ::configserver::proto::v2::ServerErrorResponse(); - ::google::protobuf::internal::OnShutdownDestroyMessage(ptr); - } - ::configserver::proto::v2::ServerErrorResponse::InitAsDefaultInstance(); -} - -::google::protobuf::internal::SCCInfo<0> scc_info_ServerErrorResponse = - {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsServerErrorResponse}, {}}; - static void InitDefaultsHeartbeatResponse() { GOOGLE_PROTOBUF_VERIFY_VERSION; @@ -253,7 +239,7 @@ static void InitDefaultsHeartbeatResponse() { ::google::protobuf::internal::SCCInfo<3> scc_info_HeartbeatResponse = {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 3, InitDefaultsHeartbeatResponse}, { - &protobuf_v2_2fagent_2eproto::scc_info_ServerErrorResponse.base, + &protobuf_v2_2fagent_2eproto::scc_info_CommonResponse.base, &protobuf_v2_2fagent_2eproto::scc_info_ConfigDetail.base, &protobuf_v2_2fagent_2eproto::scc_info_CommandDetail.base,}}; @@ -285,9 +271,23 @@ static void InitDefaultsFetchConfigResponse() { ::google::protobuf::internal::SCCInfo<2> scc_info_FetchConfigResponse = {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 2, InitDefaultsFetchConfigResponse}, { - &protobuf_v2_2fagent_2eproto::scc_info_ServerErrorResponse.base, + &protobuf_v2_2fagent_2eproto::scc_info_CommonResponse.base, &protobuf_v2_2fagent_2eproto::scc_info_ConfigDetail.base,}}; +static void InitDefaultsCommonResponse() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::configserver::proto::v2::_CommonResponse_default_instance_; + new (ptr) ::configserver::proto::v2::CommonResponse(); + ::google::protobuf::internal::OnShutdownDestroyMessage(ptr); + } + ::configserver::proto::v2::CommonResponse::InitAsDefaultInstance(); +} + +::google::protobuf::internal::SCCInfo<0> scc_info_CommonResponse = + {{ATOMIC_VAR_INIT(::google::protobuf::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsCommonResponse}, {}}; + void InitDefaults() { ::google::protobuf::internal::InitSCC(&scc_info_AgentGroupTag.base); ::google::protobuf::internal::InitSCC(&scc_info_ConfigInfo.base); @@ -297,10 +297,10 @@ void InitDefaults() { ::google::protobuf::internal::InitSCC(&scc_info_HeartbeatRequest.base); ::google::protobuf::internal::InitSCC(&scc_info_ConfigDetail.base); ::google::protobuf::internal::InitSCC(&scc_info_CommandDetail.base); - ::google::protobuf::internal::InitSCC(&scc_info_ServerErrorResponse.base); ::google::protobuf::internal::InitSCC(&scc_info_HeartbeatResponse.base); ::google::protobuf::internal::InitSCC(&scc_info_FetchConfigRequest.base); ::google::protobuf::internal::InitSCC(&scc_info_FetchConfigResponse.base); + ::google::protobuf::internal::InitSCC(&scc_info_CommonResponse.base); } ::google::protobuf::Metadata file_level_metadata[12]; @@ -365,7 +365,7 @@ const ::google::protobuf::uint32 TableStruct::offsets[] GOOGLE_PROTOBUF_ATTRIBUT GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatRequest, running_status_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatRequest, startup_time_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatRequest, pipeline_configs_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatRequest, process_configs_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatRequest, instance_configs_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatRequest, custom_commands_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatRequest, flags_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatRequest, opaque_), @@ -387,22 +387,15 @@ const ::google::protobuf::uint32 TableStruct::offsets[] GOOGLE_PROTOBUF_ATTRIBUT GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::CommandDetail, detail_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::CommandDetail, expire_time_), ~0u, // no _has_bits_ - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::ServerErrorResponse, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::ServerErrorResponse, error_code_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::ServerErrorResponse, error_message_), - ~0u, // no _has_bits_ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, request_id_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, error_response_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, commonresponse_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, capabilities_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, pipeline_config_updates_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, process_config_updates_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, instance_config_updates_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, custom_command_updates_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, flags_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::HeartbeatResponse, opaque_), @@ -420,8 +413,15 @@ const ::google::protobuf::uint32 TableStruct::offsets[] GOOGLE_PROTOBUF_ATTRIBUT ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::FetchConfigResponse, request_id_), - GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::FetchConfigResponse, error_response_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::FetchConfigResponse, commonresponse_), GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::FetchConfigResponse, config_details_), + ~0u, // no _has_bits_ + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::CommonResponse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::CommonResponse, status_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(::configserver::proto::v2::CommonResponse, errormessage_), }; static const ::google::protobuf::internal::MigrationSchema schemas[] GOOGLE_PROTOBUF_ATTRIBUTE_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, sizeof(::configserver::proto::v2::AgentGroupTag)}, @@ -432,10 +432,10 @@ static const ::google::protobuf::internal::MigrationSchema schemas[] GOOGLE_PROT { 43, -1, sizeof(::configserver::proto::v2::HeartbeatRequest)}, { 62, -1, sizeof(::configserver::proto::v2::ConfigDetail)}, { 70, -1, sizeof(::configserver::proto::v2::CommandDetail)}, - { 79, -1, sizeof(::configserver::proto::v2::ServerErrorResponse)}, - { 86, -1, sizeof(::configserver::proto::v2::HeartbeatResponse)}, - { 99, -1, sizeof(::configserver::proto::v2::FetchConfigRequest)}, - { 107, -1, sizeof(::configserver::proto::v2::FetchConfigResponse)}, + { 79, -1, sizeof(::configserver::proto::v2::HeartbeatResponse)}, + { 92, -1, sizeof(::configserver::proto::v2::FetchConfigRequest)}, + { 100, -1, sizeof(::configserver::proto::v2::FetchConfigResponse)}, + { 108, -1, sizeof(::configserver::proto::v2::CommonResponse)}, }; static ::google::protobuf::Message const * const file_default_instances[] = { @@ -447,10 +447,10 @@ static ::google::protobuf::Message const * const file_default_instances[] = { reinterpret_cast(&::configserver::proto::v2::_HeartbeatRequest_default_instance_), reinterpret_cast(&::configserver::proto::v2::_ConfigDetail_default_instance_), reinterpret_cast(&::configserver::proto::v2::_CommandDetail_default_instance_), - reinterpret_cast(&::configserver::proto::v2::_ServerErrorResponse_default_instance_), reinterpret_cast(&::configserver::proto::v2::_HeartbeatResponse_default_instance_), reinterpret_cast(&::configserver::proto::v2::_FetchConfigRequest_default_instance_), reinterpret_cast(&::configserver::proto::v2::_FetchConfigResponse_default_instance_), + reinterpret_cast(&::configserver::proto::v2::_CommonResponse_default_instance_), }; void protobuf_AssignDescriptors() { @@ -486,7 +486,7 @@ void AddDescriptorsImpl() { "(\014\022\020\n\010hostname\030\003 \001(\014\022B\n\006extras\030d \003(\01322.c" "onfigserver.proto.v2.AgentAttributes.Ext" "rasEntry\032-\n\013ExtrasEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005" - "value\030\002 \001(\014:\0028\001\"\356\003\n\020HeartbeatRequest\022\022\n\n" + "value\030\002 \001(\014:\0028\001\"\357\003\n\020HeartbeatRequest\022\022\n\n" "request_id\030\001 \001(\014\022\024\n\014sequence_num\030\002 \001(\004\022\024" "\n\014capabilities\030\003 \001(\004\022\023\n\013instance_id\030\004 \001(" "\014\022\022\n\nagent_type\030\005 \001(\t\022:\n\nattributes\030\006 \001(" @@ -495,51 +495,50 @@ void AddDescriptorsImpl() { "AgentGroupTag\022\026\n\016running_status\030\010 \001(\t\022\024\n" "\014startup_time\030\t \001(\003\022;\n\020pipeline_configs\030" "\n \003(\0132!.configserver.proto.v2.ConfigInfo" - "\022:\n\017process_configs\030\013 \003(\0132!.configserver" - ".proto.v2.ConfigInfo\022;\n\017custom_commands\030" - "\014 \003(\0132\".configserver.proto.v2.CommandInf" - "o\022\r\n\005flags\030\r \001(\004\022\016\n\006opaque\030\016 \001(\014\"=\n\014Conf" - "igDetail\022\014\n\004name\030\001 \001(\t\022\017\n\007version\030\002 \001(\003\022" - "\016\n\006detail\030\003 \001(\014\"P\n\rCommandDetail\022\014\n\004type" - "\030\001 \001(\t\022\014\n\004name\030\002 \001(\t\022\016\n\006detail\030\003 \001(\014\022\023\n\013" - "expire_time\030\004 \001(\003\"@\n\023ServerErrorResponse" - "\022\022\n\nerror_code\030\001 \001(\005\022\025\n\rerror_message\030\002 " - "\001(\t\"\361\002\n\021HeartbeatResponse\022\022\n\nrequest_id\030" - "\001 \001(\014\022B\n\016error_response\030\002 \001(\0132*.configse" - "rver.proto.v2.ServerErrorResponse\022\024\n\014cap" - "abilities\030\003 \001(\004\022D\n\027pipeline_config_updat" - "es\030\004 \003(\0132#.configserver.proto.v2.ConfigD" - "etail\022C\n\026process_config_updates\030\005 \003(\0132#." - "configserver.proto.v2.ConfigDetail\022D\n\026cu" - "stom_command_updates\030\006 \003(\0132$.configserve" - "r.proto.v2.CommandDetail\022\r\n\005flags\030\007 \001(\004\022" - "\016\n\006opaque\030\010 \001(\014\"u\n\022FetchConfigRequest\022\022\n" - "\nrequest_id\030\001 \001(\014\022\023\n\013instance_id\030\002 \001(\014\0226" - "\n\013req_configs\030\003 \003(\0132!.configserver.proto" - ".v2.ConfigInfo\"\252\001\n\023FetchConfigResponse\022\022" - "\n\nrequest_id\030\001 \001(\014\022B\n\016error_response\030\002 \001" - "(\0132*.configserver.proto.v2.ServerErrorRe" - "sponse\022;\n\016config_details\030\003 \003(\0132#.configs" - "erver.proto.v2.ConfigDetail*@\n\014ConfigSta" - "tus\022\t\n\005UNSET\020\000\022\014\n\010APPLYING\020\001\022\013\n\007APPLIED\020" - "\002\022\n\n\006FAILED\020\003*\202\001\n\021AgentCapabilities\022\036\n\032U" - "nspecifiedAgentCapability\020\000\022\031\n\025AcceptsPi" - "pelineConfig\020\001\022\030\n\024AcceptsProcessConfig\020\002" - "\022\030\n\024AcceptsCustomCommand\020\004*:\n\014RequestFla" - "gs\022\033\n\027RequestFlagsUnspecified\020\000\022\r\n\tFullS" - "tate\020\001*\254\001\n\022ServerCapabilities\022\037\n\033Unspeci" - "fiedServerCapability\020\000\022\024\n\020RembersAttribu" - "te\020\001\022\037\n\033RembersPipelineConfigStatus\020\002\022\036\n" - "\032RembersProcessConfigStatus\020\004\022\036\n\032Rembers" - "CustomCommandStatus\020\010*\177\n\rResponseFlags\022\034" - "\n\030ResponseFlagsUnspecified\020\000\022\023\n\017ReportFu" - "llState\020\001\022\035\n\031FetchPipelineConfigDetail\020\002" - "\022\034\n\030FetchProcessConfigDetail\020\004B6Z4/confi" - "g_server/service/protov2;configserver_pr" - "oto_v2b\006proto3" + "\022;\n\020instance_configs\030\013 \003(\0132!.configserve" + "r.proto.v2.ConfigInfo\022;\n\017custom_commands" + "\030\014 \003(\0132\".configserver.proto.v2.CommandIn" + "fo\022\r\n\005flags\030\r \001(\004\022\016\n\006opaque\030\016 \001(\014\"=\n\014Con" + "figDetail\022\014\n\004name\030\001 \001(\t\022\017\n\007version\030\002 \001(\003" + "\022\016\n\006detail\030\003 \001(\014\"P\n\rCommandDetail\022\014\n\004typ" + "e\030\001 \001(\t\022\014\n\004name\030\002 \001(\t\022\016\n\006detail\030\003 \001(\014\022\023\n" + "\013expire_time\030\004 \001(\003\"\355\002\n\021HeartbeatResponse" + "\022\022\n\nrequest_id\030\001 \001(\014\022=\n\016commonResponse\030\002" + " \001(\0132%.configserver.proto.v2.CommonRespo" + "nse\022\024\n\014capabilities\030\003 \001(\004\022D\n\027pipeline_co" + "nfig_updates\030\004 \003(\0132#.configserver.proto." + "v2.ConfigDetail\022D\n\027instance_config_updat" + "es\030\005 \003(\0132#.configserver.proto.v2.ConfigD" + "etail\022D\n\026custom_command_updates\030\006 \003(\0132$." + "configserver.proto.v2.CommandDetail\022\r\n\005f" + "lags\030\007 \001(\004\022\016\n\006opaque\030\010 \001(\014\"u\n\022FetchConfi" + "gRequest\022\022\n\nrequest_id\030\001 \001(\014\022\023\n\013instance" + "_id\030\002 \001(\014\0226\n\013req_configs\030\003 \003(\0132!.configs" + "erver.proto.v2.ConfigInfo\"\245\001\n\023FetchConfi" + "gResponse\022\022\n\nrequest_id\030\001 \001(\014\022=\n\016commonR" + "esponse\030\002 \001(\0132%.configserver.proto.v2.Co" + "mmonResponse\022;\n\016config_details\030\003 \003(\0132#.c" + "onfigserver.proto.v2.ConfigDetail\"6\n\016Com" + "monResponse\022\016\n\006status\030\001 \001(\005\022\024\n\014errorMess" + "age\030\002 \001(\014*@\n\014ConfigStatus\022\t\n\005UNSET\020\000\022\014\n\010" + "APPLYING\020\001\022\013\n\007APPLIED\020\002\022\n\n\006FAILED\020\003*\203\001\n\021" + "AgentCapabilities\022\036\n\032UnspecifiedAgentCap" + "ability\020\000\022\031\n\025AcceptsPipelineConfig\020\001\022\031\n\025" + "AcceptsInstanceConfig\020\002\022\030\n\024AcceptsCustom" + "Command\020\004*:\n\014RequestFlags\022\033\n\027RequestFlag" + "sUnspecified\020\000\022\r\n\tFullState\020\001*\255\001\n\022Server" + "Capabilities\022\037\n\033UnspecifiedServerCapabil" + "ity\020\000\022\024\n\020RembersAttribute\020\001\022\037\n\033RembersPi" + "pelineConfigStatus\020\002\022\037\n\033RembersInstanceC" + "onfigStatus\020\004\022\036\n\032RembersCustomCommandSta" + "tus\020\010*\200\001\n\rResponseFlags\022\034\n\030ResponseFlags" + "Unspecified\020\000\022\023\n\017ReportFullState\020\001\022\035\n\031Fe" + "tchPipelineConfigDetail\020\002\022\035\n\031FetchInstan" + "ceConfigDetail\020\004B6Z4/config_server/servi" + "ce/protov2;configserver_proto_v2b\006proto3" }; ::google::protobuf::DescriptorPool::InternalAddGeneratedFile( - descriptor, 2494); + descriptor, 2480); ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile( "v2/agent.proto", &protobuf_RegisterTypes); } @@ -2220,7 +2219,7 @@ const int HeartbeatRequest::kTagsFieldNumber; const int HeartbeatRequest::kRunningStatusFieldNumber; const int HeartbeatRequest::kStartupTimeFieldNumber; const int HeartbeatRequest::kPipelineConfigsFieldNumber; -const int HeartbeatRequest::kProcessConfigsFieldNumber; +const int HeartbeatRequest::kInstanceConfigsFieldNumber; const int HeartbeatRequest::kCustomCommandsFieldNumber; const int HeartbeatRequest::kFlagsFieldNumber; const int HeartbeatRequest::kOpaqueFieldNumber; @@ -2238,7 +2237,7 @@ HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& from) _internal_metadata_(NULL), tags_(from.tags_), pipeline_configs_(from.pipeline_configs_), - process_configs_(from.process_configs_), + instance_configs_(from.instance_configs_), custom_commands_(from.custom_commands_) { _internal_metadata_.MergeFrom(from._internal_metadata_); request_id_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); @@ -2319,7 +2318,7 @@ void HeartbeatRequest::Clear() { tags_.Clear(); pipeline_configs_.Clear(); - process_configs_.Clear(); + instance_configs_.Clear(); custom_commands_.Clear(); request_id_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); instance_id_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); @@ -2480,12 +2479,12 @@ bool HeartbeatRequest::MergePartialFromCodedStream( break; } - // repeated .configserver.proto.v2.ConfigInfo process_configs = 11; + // repeated .configserver.proto.v2.ConfigInfo instance_configs = 11; case 11: { if (static_cast< ::google::protobuf::uint8>(tag) == static_cast< ::google::protobuf::uint8>(90u /* 90 & 0xFF */)) { DO_(::google::protobuf::internal::WireFormatLite::ReadMessage( - input, add_process_configs())); + input, add_instance_configs())); } else { goto handle_unusual; } @@ -2627,12 +2626,12 @@ void HeartbeatRequest::SerializeWithCachedSizes( output); } - // repeated .configserver.proto.v2.ConfigInfo process_configs = 11; + // repeated .configserver.proto.v2.ConfigInfo instance_configs = 11; for (unsigned int i = 0, - n = static_cast(this->process_configs_size()); i < n; i++) { + n = static_cast(this->instance_configs_size()); i < n; i++) { ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( 11, - this->process_configs(static_cast(i)), + this->instance_configs(static_cast(i)), output); } @@ -2744,12 +2743,12 @@ ::google::protobuf::uint8* HeartbeatRequest::InternalSerializeWithCachedSizesToA 10, this->pipeline_configs(static_cast(i)), deterministic, target); } - // repeated .configserver.proto.v2.ConfigInfo process_configs = 11; + // repeated .configserver.proto.v2.ConfigInfo instance_configs = 11; for (unsigned int i = 0, - n = static_cast(this->process_configs_size()); i < n; i++) { + n = static_cast(this->instance_configs_size()); i < n; i++) { target = ::google::protobuf::internal::WireFormatLite:: InternalWriteMessageToArray( - 11, this->process_configs(static_cast(i)), deterministic, target); + 11, this->instance_configs(static_cast(i)), deterministic, target); } // repeated .configserver.proto.v2.CommandInfo custom_commands = 12; @@ -2811,14 +2810,14 @@ size_t HeartbeatRequest::ByteSizeLong() const { } } - // repeated .configserver.proto.v2.ConfigInfo process_configs = 11; + // repeated .configserver.proto.v2.ConfigInfo instance_configs = 11; { - unsigned int count = static_cast(this->process_configs_size()); + unsigned int count = static_cast(this->instance_configs_size()); total_size += 1UL * count; for (unsigned int i = 0; i < count; i++) { total_size += ::google::protobuf::internal::WireFormatLite::MessageSize( - this->process_configs(static_cast(i))); + this->instance_configs(static_cast(i))); } } @@ -2932,7 +2931,7 @@ void HeartbeatRequest::MergeFrom(const HeartbeatRequest& from) { tags_.MergeFrom(from.tags_); pipeline_configs_.MergeFrom(from.pipeline_configs_); - process_configs_.MergeFrom(from.process_configs_); + instance_configs_.MergeFrom(from.instance_configs_); custom_commands_.MergeFrom(from.custom_commands_); if (from.request_id().size() > 0) { @@ -2997,7 +2996,7 @@ void HeartbeatRequest::InternalSwap(HeartbeatRequest* other) { using std::swap; CastToBase(&tags_)->InternalSwap(CastToBase(&other->tags_)); CastToBase(&pipeline_configs_)->InternalSwap(CastToBase(&other->pipeline_configs_)); - CastToBase(&process_configs_)->InternalSwap(CastToBase(&other->process_configs_)); + CastToBase(&instance_configs_)->InternalSwap(CastToBase(&other->instance_configs_)); CastToBase(&custom_commands_)->InternalSwap(CastToBase(&other->custom_commands_)); request_id_.Swap(&other->request_id_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); @@ -3735,299 +3734,18 @@ ::google::protobuf::Metadata CommandDetail::GetMetadata() const { } -// =================================================================== - -void ServerErrorResponse::InitAsDefaultInstance() { -} -#if !defined(_MSC_VER) || _MSC_VER >= 1900 -const int ServerErrorResponse::kErrorCodeFieldNumber; -const int ServerErrorResponse::kErrorMessageFieldNumber; -#endif // !defined(_MSC_VER) || _MSC_VER >= 1900 - -ServerErrorResponse::ServerErrorResponse() - : ::google::protobuf::Message(), _internal_metadata_(NULL) { - ::google::protobuf::internal::InitSCC( - &protobuf_v2_2fagent_2eproto::scc_info_ServerErrorResponse.base); - SharedCtor(); - // @@protoc_insertion_point(constructor:configserver.proto.v2.ServerErrorResponse) -} -ServerErrorResponse::ServerErrorResponse(const ServerErrorResponse& from) - : ::google::protobuf::Message(), - _internal_metadata_(NULL) { - _internal_metadata_.MergeFrom(from._internal_metadata_); - error_message_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - if (from.error_message().size() > 0) { - error_message_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.error_message_); - } - error_code_ = from.error_code_; - // @@protoc_insertion_point(copy_constructor:configserver.proto.v2.ServerErrorResponse) -} - -void ServerErrorResponse::SharedCtor() { - error_message_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - error_code_ = 0; -} - -ServerErrorResponse::~ServerErrorResponse() { - // @@protoc_insertion_point(destructor:configserver.proto.v2.ServerErrorResponse) - SharedDtor(); -} - -void ServerErrorResponse::SharedDtor() { - error_message_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} - -void ServerErrorResponse::SetCachedSize(int size) const { - _cached_size_.Set(size); -} -const ::google::protobuf::Descriptor* ServerErrorResponse::descriptor() { - ::protobuf_v2_2fagent_2eproto::protobuf_AssignDescriptorsOnce(); - return ::protobuf_v2_2fagent_2eproto::file_level_metadata[kIndexInFileMessages].descriptor; -} - -const ServerErrorResponse& ServerErrorResponse::default_instance() { - ::google::protobuf::internal::InitSCC(&protobuf_v2_2fagent_2eproto::scc_info_ServerErrorResponse.base); - return *internal_default_instance(); -} - - -void ServerErrorResponse::Clear() { -// @@protoc_insertion_point(message_clear_start:configserver.proto.v2.ServerErrorResponse) - ::google::protobuf::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - error_message_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - error_code_ = 0; - _internal_metadata_.Clear(); -} - -bool ServerErrorResponse::MergePartialFromCodedStream( - ::google::protobuf::io::CodedInputStream* input) { -#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure - ::google::protobuf::uint32 tag; - // @@protoc_insertion_point(parse_start:configserver.proto.v2.ServerErrorResponse) - for (;;) { - ::std::pair<::google::protobuf::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); - tag = p.first; - if (!p.second) goto handle_unusual; - switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { - // int32 error_code = 1; - case 1: { - if (static_cast< ::google::protobuf::uint8>(tag) == - static_cast< ::google::protobuf::uint8>(8u /* 8 & 0xFF */)) { - - DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< - ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( - input, &error_code_))); - } else { - goto handle_unusual; - } - break; - } - - // string error_message = 2; - case 2: { - if (static_cast< ::google::protobuf::uint8>(tag) == - static_cast< ::google::protobuf::uint8>(18u /* 18 & 0xFF */)) { - DO_(::google::protobuf::internal::WireFormatLite::ReadString( - input, this->mutable_error_message())); - DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->error_message().data(), static_cast(this->error_message().length()), - ::google::protobuf::internal::WireFormatLite::PARSE, - "configserver.proto.v2.ServerErrorResponse.error_message")); - } else { - goto handle_unusual; - } - break; - } - - default: { - handle_unusual: - if (tag == 0) { - goto success; - } - DO_(::google::protobuf::internal::WireFormat::SkipField( - input, tag, _internal_metadata_.mutable_unknown_fields())); - break; - } - } - } -success: - // @@protoc_insertion_point(parse_success:configserver.proto.v2.ServerErrorResponse) - return true; -failure: - // @@protoc_insertion_point(parse_failure:configserver.proto.v2.ServerErrorResponse) - return false; -#undef DO_ -} - -void ServerErrorResponse::SerializeWithCachedSizes( - ::google::protobuf::io::CodedOutputStream* output) const { - // @@protoc_insertion_point(serialize_start:configserver.proto.v2.ServerErrorResponse) - ::google::protobuf::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // int32 error_code = 1; - if (this->error_code() != 0) { - ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->error_code(), output); - } - - // string error_message = 2; - if (this->error_message().size() > 0) { - ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->error_message().data(), static_cast(this->error_message().length()), - ::google::protobuf::internal::WireFormatLite::SERIALIZE, - "configserver.proto.v2.ServerErrorResponse.error_message"); - ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( - 2, this->error_message(), output); - } - - if ((_internal_metadata_.have_unknown_fields() && ::google::protobuf::internal::GetProto3PreserveUnknownsDefault())) { - ::google::protobuf::internal::WireFormat::SerializeUnknownFields( - (::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()), output); - } - // @@protoc_insertion_point(serialize_end:configserver.proto.v2.ServerErrorResponse) -} - -::google::protobuf::uint8* ServerErrorResponse::InternalSerializeWithCachedSizesToArray( - bool deterministic, ::google::protobuf::uint8* target) const { - (void)deterministic; // Unused - // @@protoc_insertion_point(serialize_to_array_start:configserver.proto.v2.ServerErrorResponse) - ::google::protobuf::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // int32 error_code = 1; - if (this->error_code() != 0) { - target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->error_code(), target); - } - - // string error_message = 2; - if (this->error_message().size() > 0) { - ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( - this->error_message().data(), static_cast(this->error_message().length()), - ::google::protobuf::internal::WireFormatLite::SERIALIZE, - "configserver.proto.v2.ServerErrorResponse.error_message"); - target = - ::google::protobuf::internal::WireFormatLite::WriteStringToArray( - 2, this->error_message(), target); - } - - if ((_internal_metadata_.have_unknown_fields() && ::google::protobuf::internal::GetProto3PreserveUnknownsDefault())) { - target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( - (::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()), target); - } - // @@protoc_insertion_point(serialize_to_array_end:configserver.proto.v2.ServerErrorResponse) - return target; -} - -size_t ServerErrorResponse::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:configserver.proto.v2.ServerErrorResponse) - size_t total_size = 0; - - if ((_internal_metadata_.have_unknown_fields() && ::google::protobuf::internal::GetProto3PreserveUnknownsDefault())) { - total_size += - ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( - (::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance())); - } - // string error_message = 2; - if (this->error_message().size() > 0) { - total_size += 1 + - ::google::protobuf::internal::WireFormatLite::StringSize( - this->error_message()); - } - - // int32 error_code = 1; - if (this->error_code() != 0) { - total_size += 1 + - ::google::protobuf::internal::WireFormatLite::Int32Size( - this->error_code()); - } - - int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); - SetCachedSize(cached_size); - return total_size; -} - -void ServerErrorResponse::MergeFrom(const ::google::protobuf::Message& from) { -// @@protoc_insertion_point(generalized_merge_from_start:configserver.proto.v2.ServerErrorResponse) - GOOGLE_DCHECK_NE(&from, this); - const ServerErrorResponse* source = - ::google::protobuf::internal::DynamicCastToGenerated( - &from); - if (source == NULL) { - // @@protoc_insertion_point(generalized_merge_from_cast_fail:configserver.proto.v2.ServerErrorResponse) - ::google::protobuf::internal::ReflectionOps::Merge(from, this); - } else { - // @@protoc_insertion_point(generalized_merge_from_cast_success:configserver.proto.v2.ServerErrorResponse) - MergeFrom(*source); - } -} - -void ServerErrorResponse::MergeFrom(const ServerErrorResponse& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:configserver.proto.v2.ServerErrorResponse) - GOOGLE_DCHECK_NE(&from, this); - _internal_metadata_.MergeFrom(from._internal_metadata_); - ::google::protobuf::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - if (from.error_message().size() > 0) { - - error_message_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.error_message_); - } - if (from.error_code() != 0) { - set_error_code(from.error_code()); - } -} - -void ServerErrorResponse::CopyFrom(const ::google::protobuf::Message& from) { -// @@protoc_insertion_point(generalized_copy_from_start:configserver.proto.v2.ServerErrorResponse) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -void ServerErrorResponse::CopyFrom(const ServerErrorResponse& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:configserver.proto.v2.ServerErrorResponse) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool ServerErrorResponse::IsInitialized() const { - return true; -} - -void ServerErrorResponse::Swap(ServerErrorResponse* other) { - if (other == this) return; - InternalSwap(other); -} -void ServerErrorResponse::InternalSwap(ServerErrorResponse* other) { - using std::swap; - error_message_.Swap(&other->error_message_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), - GetArenaNoVirtual()); - swap(error_code_, other->error_code_); - _internal_metadata_.Swap(&other->_internal_metadata_); -} - -::google::protobuf::Metadata ServerErrorResponse::GetMetadata() const { - protobuf_v2_2fagent_2eproto::protobuf_AssignDescriptorsOnce(); - return ::protobuf_v2_2fagent_2eproto::file_level_metadata[kIndexInFileMessages]; -} - - // =================================================================== void HeartbeatResponse::InitAsDefaultInstance() { - ::configserver::proto::v2::_HeartbeatResponse_default_instance_._instance.get_mutable()->error_response_ = const_cast< ::configserver::proto::v2::ServerErrorResponse*>( - ::configserver::proto::v2::ServerErrorResponse::internal_default_instance()); + ::configserver::proto::v2::_HeartbeatResponse_default_instance_._instance.get_mutable()->commonresponse_ = const_cast< ::configserver::proto::v2::CommonResponse*>( + ::configserver::proto::v2::CommonResponse::internal_default_instance()); } #if !defined(_MSC_VER) || _MSC_VER >= 1900 const int HeartbeatResponse::kRequestIdFieldNumber; -const int HeartbeatResponse::kErrorResponseFieldNumber; +const int HeartbeatResponse::kCommonResponseFieldNumber; const int HeartbeatResponse::kCapabilitiesFieldNumber; const int HeartbeatResponse::kPipelineConfigUpdatesFieldNumber; -const int HeartbeatResponse::kProcessConfigUpdatesFieldNumber; +const int HeartbeatResponse::kInstanceConfigUpdatesFieldNumber; const int HeartbeatResponse::kCustomCommandUpdatesFieldNumber; const int HeartbeatResponse::kFlagsFieldNumber; const int HeartbeatResponse::kOpaqueFieldNumber; @@ -4044,7 +3762,7 @@ HeartbeatResponse::HeartbeatResponse(const HeartbeatResponse& from) : ::google::protobuf::Message(), _internal_metadata_(NULL), pipeline_config_updates_(from.pipeline_config_updates_), - process_config_updates_(from.process_config_updates_), + instance_config_updates_(from.instance_config_updates_), custom_command_updates_(from.custom_command_updates_) { _internal_metadata_.MergeFrom(from._internal_metadata_); request_id_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); @@ -4055,10 +3773,10 @@ HeartbeatResponse::HeartbeatResponse(const HeartbeatResponse& from) if (from.opaque().size() > 0) { opaque_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.opaque_); } - if (from.has_error_response()) { - error_response_ = new ::configserver::proto::v2::ServerErrorResponse(*from.error_response_); + if (from.has_commonresponse()) { + commonresponse_ = new ::configserver::proto::v2::CommonResponse(*from.commonresponse_); } else { - error_response_ = NULL; + commonresponse_ = NULL; } ::memcpy(&capabilities_, &from.capabilities_, static_cast(reinterpret_cast(&flags_) - @@ -4069,9 +3787,9 @@ HeartbeatResponse::HeartbeatResponse(const HeartbeatResponse& from) void HeartbeatResponse::SharedCtor() { request_id_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); opaque_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - ::memset(&error_response_, 0, static_cast( + ::memset(&commonresponse_, 0, static_cast( reinterpret_cast(&flags_) - - reinterpret_cast(&error_response_)) + sizeof(flags_)); + reinterpret_cast(&commonresponse_)) + sizeof(flags_)); } HeartbeatResponse::~HeartbeatResponse() { @@ -4082,7 +3800,7 @@ HeartbeatResponse::~HeartbeatResponse() { void HeartbeatResponse::SharedDtor() { request_id_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); opaque_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - if (this != internal_default_instance()) delete error_response_; + if (this != internal_default_instance()) delete commonresponse_; } void HeartbeatResponse::SetCachedSize(int size) const { @@ -4106,14 +3824,14 @@ void HeartbeatResponse::Clear() { (void) cached_has_bits; pipeline_config_updates_.Clear(); - process_config_updates_.Clear(); + instance_config_updates_.Clear(); custom_command_updates_.Clear(); request_id_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); opaque_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - if (GetArenaNoVirtual() == NULL && error_response_ != NULL) { - delete error_response_; + if (GetArenaNoVirtual() == NULL && commonresponse_ != NULL) { + delete commonresponse_; } - error_response_ = NULL; + commonresponse_ = NULL; ::memset(&capabilities_, 0, static_cast( reinterpret_cast(&flags_) - reinterpret_cast(&capabilities_)) + sizeof(flags_)); @@ -4142,12 +3860,12 @@ bool HeartbeatResponse::MergePartialFromCodedStream( break; } - // .configserver.proto.v2.ServerErrorResponse error_response = 2; + // .configserver.proto.v2.CommonResponse commonResponse = 2; case 2: { if (static_cast< ::google::protobuf::uint8>(tag) == static_cast< ::google::protobuf::uint8>(18u /* 18 & 0xFF */)) { DO_(::google::protobuf::internal::WireFormatLite::ReadMessage( - input, mutable_error_response())); + input, mutable_commonresponse())); } else { goto handle_unusual; } @@ -4180,12 +3898,12 @@ bool HeartbeatResponse::MergePartialFromCodedStream( break; } - // repeated .configserver.proto.v2.ConfigDetail process_config_updates = 5; + // repeated .configserver.proto.v2.ConfigDetail instance_config_updates = 5; case 5: { if (static_cast< ::google::protobuf::uint8>(tag) == static_cast< ::google::protobuf::uint8>(42u /* 42 & 0xFF */)) { DO_(::google::protobuf::internal::WireFormatLite::ReadMessage( - input, add_process_config_updates())); + input, add_instance_config_updates())); } else { goto handle_unusual; } @@ -4262,10 +3980,10 @@ void HeartbeatResponse::SerializeWithCachedSizes( 1, this->request_id(), output); } - // .configserver.proto.v2.ServerErrorResponse error_response = 2; - if (this->has_error_response()) { + // .configserver.proto.v2.CommonResponse commonResponse = 2; + if (this->has_commonresponse()) { ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( - 2, this->_internal_error_response(), output); + 2, this->_internal_commonresponse(), output); } // uint64 capabilities = 3; @@ -4282,12 +4000,12 @@ void HeartbeatResponse::SerializeWithCachedSizes( output); } - // repeated .configserver.proto.v2.ConfigDetail process_config_updates = 5; + // repeated .configserver.proto.v2.ConfigDetail instance_config_updates = 5; for (unsigned int i = 0, - n = static_cast(this->process_config_updates_size()); i < n; i++) { + n = static_cast(this->instance_config_updates_size()); i < n; i++) { ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( 5, - this->process_config_updates(static_cast(i)), + this->instance_config_updates(static_cast(i)), output); } @@ -4332,11 +4050,11 @@ ::google::protobuf::uint8* HeartbeatResponse::InternalSerializeWithCachedSizesTo 1, this->request_id(), target); } - // .configserver.proto.v2.ServerErrorResponse error_response = 2; - if (this->has_error_response()) { + // .configserver.proto.v2.CommonResponse commonResponse = 2; + if (this->has_commonresponse()) { target = ::google::protobuf::internal::WireFormatLite:: InternalWriteMessageToArray( - 2, this->_internal_error_response(), deterministic, target); + 2, this->_internal_commonresponse(), deterministic, target); } // uint64 capabilities = 3; @@ -4352,12 +4070,12 @@ ::google::protobuf::uint8* HeartbeatResponse::InternalSerializeWithCachedSizesTo 4, this->pipeline_config_updates(static_cast(i)), deterministic, target); } - // repeated .configserver.proto.v2.ConfigDetail process_config_updates = 5; + // repeated .configserver.proto.v2.ConfigDetail instance_config_updates = 5; for (unsigned int i = 0, - n = static_cast(this->process_config_updates_size()); i < n; i++) { + n = static_cast(this->instance_config_updates_size()); i < n; i++) { target = ::google::protobuf::internal::WireFormatLite:: InternalWriteMessageToArray( - 5, this->process_config_updates(static_cast(i)), deterministic, target); + 5, this->instance_config_updates(static_cast(i)), deterministic, target); } // repeated .configserver.proto.v2.CommandDetail custom_command_updates = 6; @@ -4408,14 +4126,14 @@ size_t HeartbeatResponse::ByteSizeLong() const { } } - // repeated .configserver.proto.v2.ConfigDetail process_config_updates = 5; + // repeated .configserver.proto.v2.ConfigDetail instance_config_updates = 5; { - unsigned int count = static_cast(this->process_config_updates_size()); + unsigned int count = static_cast(this->instance_config_updates_size()); total_size += 1UL * count; for (unsigned int i = 0; i < count; i++) { total_size += ::google::protobuf::internal::WireFormatLite::MessageSize( - this->process_config_updates(static_cast(i))); + this->instance_config_updates(static_cast(i))); } } @@ -4444,11 +4162,11 @@ size_t HeartbeatResponse::ByteSizeLong() const { this->opaque()); } - // .configserver.proto.v2.ServerErrorResponse error_response = 2; - if (this->has_error_response()) { + // .configserver.proto.v2.CommonResponse commonResponse = 2; + if (this->has_commonresponse()) { total_size += 1 + ::google::protobuf::internal::WireFormatLite::MessageSize( - *error_response_); + *commonresponse_); } // uint64 capabilities = 3; @@ -4493,7 +4211,7 @@ void HeartbeatResponse::MergeFrom(const HeartbeatResponse& from) { (void) cached_has_bits; pipeline_config_updates_.MergeFrom(from.pipeline_config_updates_); - process_config_updates_.MergeFrom(from.process_config_updates_); + instance_config_updates_.MergeFrom(from.instance_config_updates_); custom_command_updates_.MergeFrom(from.custom_command_updates_); if (from.request_id().size() > 0) { @@ -4503,8 +4221,8 @@ void HeartbeatResponse::MergeFrom(const HeartbeatResponse& from) { opaque_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.opaque_); } - if (from.has_error_response()) { - mutable_error_response()->::configserver::proto::v2::ServerErrorResponse::MergeFrom(from.error_response()); + if (from.has_commonresponse()) { + mutable_commonresponse()->::configserver::proto::v2::CommonResponse::MergeFrom(from.commonresponse()); } if (from.capabilities() != 0) { set_capabilities(from.capabilities()); @@ -4539,13 +4257,13 @@ void HeartbeatResponse::Swap(HeartbeatResponse* other) { void HeartbeatResponse::InternalSwap(HeartbeatResponse* other) { using std::swap; CastToBase(&pipeline_config_updates_)->InternalSwap(CastToBase(&other->pipeline_config_updates_)); - CastToBase(&process_config_updates_)->InternalSwap(CastToBase(&other->process_config_updates_)); + CastToBase(&instance_config_updates_)->InternalSwap(CastToBase(&other->instance_config_updates_)); CastToBase(&custom_command_updates_)->InternalSwap(CastToBase(&other->custom_command_updates_)); request_id_.Swap(&other->request_id_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); opaque_.Swap(&other->opaque_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); - swap(error_response_, other->error_response_); + swap(commonresponse_, other->commonresponse_); swap(capabilities_, other->capabilities_); swap(flags_, other->flags_); _internal_metadata_.Swap(&other->_internal_metadata_); @@ -4881,12 +4599,12 @@ ::google::protobuf::Metadata FetchConfigRequest::GetMetadata() const { // =================================================================== void FetchConfigResponse::InitAsDefaultInstance() { - ::configserver::proto::v2::_FetchConfigResponse_default_instance_._instance.get_mutable()->error_response_ = const_cast< ::configserver::proto::v2::ServerErrorResponse*>( - ::configserver::proto::v2::ServerErrorResponse::internal_default_instance()); + ::configserver::proto::v2::_FetchConfigResponse_default_instance_._instance.get_mutable()->commonresponse_ = const_cast< ::configserver::proto::v2::CommonResponse*>( + ::configserver::proto::v2::CommonResponse::internal_default_instance()); } #if !defined(_MSC_VER) || _MSC_VER >= 1900 const int FetchConfigResponse::kRequestIdFieldNumber; -const int FetchConfigResponse::kErrorResponseFieldNumber; +const int FetchConfigResponse::kCommonResponseFieldNumber; const int FetchConfigResponse::kConfigDetailsFieldNumber; #endif // !defined(_MSC_VER) || _MSC_VER >= 1900 @@ -4906,17 +4624,17 @@ FetchConfigResponse::FetchConfigResponse(const FetchConfigResponse& from) if (from.request_id().size() > 0) { request_id_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.request_id_); } - if (from.has_error_response()) { - error_response_ = new ::configserver::proto::v2::ServerErrorResponse(*from.error_response_); + if (from.has_commonresponse()) { + commonresponse_ = new ::configserver::proto::v2::CommonResponse(*from.commonresponse_); } else { - error_response_ = NULL; + commonresponse_ = NULL; } // @@protoc_insertion_point(copy_constructor:configserver.proto.v2.FetchConfigResponse) } void FetchConfigResponse::SharedCtor() { request_id_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - error_response_ = NULL; + commonresponse_ = NULL; } FetchConfigResponse::~FetchConfigResponse() { @@ -4926,7 +4644,7 @@ FetchConfigResponse::~FetchConfigResponse() { void FetchConfigResponse::SharedDtor() { request_id_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - if (this != internal_default_instance()) delete error_response_; + if (this != internal_default_instance()) delete commonresponse_; } void FetchConfigResponse::SetCachedSize(int size) const { @@ -4951,10 +4669,10 @@ void FetchConfigResponse::Clear() { config_details_.Clear(); request_id_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); - if (GetArenaNoVirtual() == NULL && error_response_ != NULL) { - delete error_response_; + if (GetArenaNoVirtual() == NULL && commonresponse_ != NULL) { + delete commonresponse_; } - error_response_ = NULL; + commonresponse_ = NULL; _internal_metadata_.Clear(); } @@ -4980,12 +4698,12 @@ bool FetchConfigResponse::MergePartialFromCodedStream( break; } - // .configserver.proto.v2.ServerErrorResponse error_response = 2; + // .configserver.proto.v2.CommonResponse commonResponse = 2; case 2: { if (static_cast< ::google::protobuf::uint8>(tag) == static_cast< ::google::protobuf::uint8>(18u /* 18 & 0xFF */)) { DO_(::google::protobuf::internal::WireFormatLite::ReadMessage( - input, mutable_error_response())); + input, mutable_commonresponse())); } else { goto handle_unusual; } @@ -5036,10 +4754,10 @@ void FetchConfigResponse::SerializeWithCachedSizes( 1, this->request_id(), output); } - // .configserver.proto.v2.ServerErrorResponse error_response = 2; - if (this->has_error_response()) { + // .configserver.proto.v2.CommonResponse commonResponse = 2; + if (this->has_commonresponse()) { ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( - 2, this->_internal_error_response(), output); + 2, this->_internal_commonresponse(), output); } // repeated .configserver.proto.v2.ConfigDetail config_details = 3; @@ -5072,11 +4790,11 @@ ::google::protobuf::uint8* FetchConfigResponse::InternalSerializeWithCachedSizes 1, this->request_id(), target); } - // .configserver.proto.v2.ServerErrorResponse error_response = 2; - if (this->has_error_response()) { + // .configserver.proto.v2.CommonResponse commonResponse = 2; + if (this->has_commonresponse()) { target = ::google::protobuf::internal::WireFormatLite:: InternalWriteMessageToArray( - 2, this->_internal_error_response(), deterministic, target); + 2, this->_internal_commonresponse(), deterministic, target); } // repeated .configserver.proto.v2.ConfigDetail config_details = 3; @@ -5122,11 +4840,11 @@ size_t FetchConfigResponse::ByteSizeLong() const { this->request_id()); } - // .configserver.proto.v2.ServerErrorResponse error_response = 2; - if (this->has_error_response()) { + // .configserver.proto.v2.CommonResponse commonResponse = 2; + if (this->has_commonresponse()) { total_size += 1 + ::google::protobuf::internal::WireFormatLite::MessageSize( - *error_response_); + *commonresponse_); } int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); @@ -5161,8 +4879,8 @@ void FetchConfigResponse::MergeFrom(const FetchConfigResponse& from) { request_id_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.request_id_); } - if (from.has_error_response()) { - mutable_error_response()->::configserver::proto::v2::ServerErrorResponse::MergeFrom(from.error_response()); + if (from.has_commonresponse()) { + mutable_commonresponse()->::configserver::proto::v2::CommonResponse::MergeFrom(from.commonresponse()); } } @@ -5193,7 +4911,7 @@ void FetchConfigResponse::InternalSwap(FetchConfigResponse* other) { CastToBase(&config_details_)->InternalSwap(CastToBase(&other->config_details_)); request_id_.Swap(&other->request_id_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); - swap(error_response_, other->error_response_); + swap(commonresponse_, other->commonresponse_); _internal_metadata_.Swap(&other->_internal_metadata_); } @@ -5203,6 +4921,275 @@ ::google::protobuf::Metadata FetchConfigResponse::GetMetadata() const { } +// =================================================================== + +void CommonResponse::InitAsDefaultInstance() { +} +#if !defined(_MSC_VER) || _MSC_VER >= 1900 +const int CommonResponse::kStatusFieldNumber; +const int CommonResponse::kErrorMessageFieldNumber; +#endif // !defined(_MSC_VER) || _MSC_VER >= 1900 + +CommonResponse::CommonResponse() + : ::google::protobuf::Message(), _internal_metadata_(NULL) { + ::google::protobuf::internal::InitSCC( + &protobuf_v2_2fagent_2eproto::scc_info_CommonResponse.base); + SharedCtor(); + // @@protoc_insertion_point(constructor:configserver.proto.v2.CommonResponse) +} +CommonResponse::CommonResponse(const CommonResponse& from) + : ::google::protobuf::Message(), + _internal_metadata_(NULL) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + errormessage_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (from.errormessage().size() > 0) { + errormessage_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.errormessage_); + } + status_ = from.status_; + // @@protoc_insertion_point(copy_constructor:configserver.proto.v2.CommonResponse) +} + +void CommonResponse::SharedCtor() { + errormessage_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + status_ = 0; +} + +CommonResponse::~CommonResponse() { + // @@protoc_insertion_point(destructor:configserver.proto.v2.CommonResponse) + SharedDtor(); +} + +void CommonResponse::SharedDtor() { + errormessage_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} + +void CommonResponse::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const ::google::protobuf::Descriptor* CommonResponse::descriptor() { + ::protobuf_v2_2fagent_2eproto::protobuf_AssignDescriptorsOnce(); + return ::protobuf_v2_2fagent_2eproto::file_level_metadata[kIndexInFileMessages].descriptor; +} + +const CommonResponse& CommonResponse::default_instance() { + ::google::protobuf::internal::InitSCC(&protobuf_v2_2fagent_2eproto::scc_info_CommonResponse.base); + return *internal_default_instance(); +} + + +void CommonResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:configserver.proto.v2.CommonResponse) + ::google::protobuf::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + errormessage_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + status_ = 0; + _internal_metadata_.Clear(); +} + +bool CommonResponse::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:configserver.proto.v2.CommonResponse) + for (;;) { + ::std::pair<::google::protobuf::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // int32 status = 1; + case 1: { + if (static_cast< ::google::protobuf::uint8>(tag) == + static_cast< ::google::protobuf::uint8>(8u /* 8 & 0xFF */)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &status_))); + } else { + goto handle_unusual; + } + break; + } + + // bytes errorMessage = 2; + case 2: { + if (static_cast< ::google::protobuf::uint8>(tag) == + static_cast< ::google::protobuf::uint8>(18u /* 18 & 0xFF */)) { + DO_(::google::protobuf::internal::WireFormatLite::ReadBytes( + input, this->mutable_errormessage())); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:configserver.proto.v2.CommonResponse) + return true; +failure: + // @@protoc_insertion_point(parse_failure:configserver.proto.v2.CommonResponse) + return false; +#undef DO_ +} + +void CommonResponse::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:configserver.proto.v2.CommonResponse) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // int32 status = 1; + if (this->status() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->status(), output); + } + + // bytes errorMessage = 2; + if (this->errormessage().size() > 0) { + ::google::protobuf::internal::WireFormatLite::WriteBytesMaybeAliased( + 2, this->errormessage(), output); + } + + if ((_internal_metadata_.have_unknown_fields() && ::google::protobuf::internal::GetProto3PreserveUnknownsDefault())) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + (::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()), output); + } + // @@protoc_insertion_point(serialize_end:configserver.proto.v2.CommonResponse) +} + +::google::protobuf::uint8* CommonResponse::InternalSerializeWithCachedSizesToArray( + bool deterministic, ::google::protobuf::uint8* target) const { + (void)deterministic; // Unused + // @@protoc_insertion_point(serialize_to_array_start:configserver.proto.v2.CommonResponse) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // int32 status = 1; + if (this->status() != 0) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->status(), target); + } + + // bytes errorMessage = 2; + if (this->errormessage().size() > 0) { + target = + ::google::protobuf::internal::WireFormatLite::WriteBytesToArray( + 2, this->errormessage(), target); + } + + if ((_internal_metadata_.have_unknown_fields() && ::google::protobuf::internal::GetProto3PreserveUnknownsDefault())) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + (::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()), target); + } + // @@protoc_insertion_point(serialize_to_array_end:configserver.proto.v2.CommonResponse) + return target; +} + +size_t CommonResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:configserver.proto.v2.CommonResponse) + size_t total_size = 0; + + if ((_internal_metadata_.have_unknown_fields() && ::google::protobuf::internal::GetProto3PreserveUnknownsDefault())) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + (::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance())); + } + // bytes errorMessage = 2; + if (this->errormessage().size() > 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::BytesSize( + this->errormessage()); + } + + // int32 status = 1; + if (this->status() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->status()); + } + + int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void CommonResponse::MergeFrom(const ::google::protobuf::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:configserver.proto.v2.CommonResponse) + GOOGLE_DCHECK_NE(&from, this); + const CommonResponse* source = + ::google::protobuf::internal::DynamicCastToGenerated( + &from); + if (source == NULL) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:configserver.proto.v2.CommonResponse) + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:configserver.proto.v2.CommonResponse) + MergeFrom(*source); + } +} + +void CommonResponse::MergeFrom(const CommonResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:configserver.proto.v2.CommonResponse) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from.errormessage().size() > 0) { + + errormessage_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.errormessage_); + } + if (from.status() != 0) { + set_status(from.status()); + } +} + +void CommonResponse::CopyFrom(const ::google::protobuf::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:configserver.proto.v2.CommonResponse) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void CommonResponse::CopyFrom(const CommonResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:configserver.proto.v2.CommonResponse) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool CommonResponse::IsInitialized() const { + return true; +} + +void CommonResponse::Swap(CommonResponse* other) { + if (other == this) return; + InternalSwap(other); +} +void CommonResponse::InternalSwap(CommonResponse* other) { + using std::swap; + errormessage_.Swap(&other->errormessage_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); + swap(status_, other->status_); + _internal_metadata_.Swap(&other->_internal_metadata_); +} + +::google::protobuf::Metadata CommonResponse::GetMetadata() const { + protobuf_v2_2fagent_2eproto::protobuf_AssignDescriptorsOnce(); + return ::protobuf_v2_2fagent_2eproto::file_level_metadata[kIndexInFileMessages]; +} + + // @@protoc_insertion_point(namespace_scope) } // namespace v2 } // namespace proto @@ -5233,9 +5220,6 @@ template<> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE ::configserver::proto::v2::ConfigD template<> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE ::configserver::proto::v2::CommandDetail* Arena::CreateMaybeMessage< ::configserver::proto::v2::CommandDetail >(Arena* arena) { return Arena::CreateInternal< ::configserver::proto::v2::CommandDetail >(arena); } -template<> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE ::configserver::proto::v2::ServerErrorResponse* Arena::CreateMaybeMessage< ::configserver::proto::v2::ServerErrorResponse >(Arena* arena) { - return Arena::CreateInternal< ::configserver::proto::v2::ServerErrorResponse >(arena); -} template<> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE ::configserver::proto::v2::HeartbeatResponse* Arena::CreateMaybeMessage< ::configserver::proto::v2::HeartbeatResponse >(Arena* arena) { return Arena::CreateInternal< ::configserver::proto::v2::HeartbeatResponse >(arena); } @@ -5245,6 +5229,9 @@ template<> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE ::configserver::proto::v2::FetchCo template<> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE ::configserver::proto::v2::FetchConfigResponse* Arena::CreateMaybeMessage< ::configserver::proto::v2::FetchConfigResponse >(Arena* arena) { return Arena::CreateInternal< ::configserver::proto::v2::FetchConfigResponse >(arena); } +template<> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE ::configserver::proto::v2::CommonResponse* Arena::CreateMaybeMessage< ::configserver::proto::v2::CommonResponse >(Arena* arena) { + return Arena::CreateInternal< ::configserver::proto::v2::CommonResponse >(arena); +} } // namespace protobuf } // namespace google diff --git a/core/config_server_pb/v2/agent.pb.h b/core/protobuf/config_server/v2/agent.pb.h similarity index 92% rename from core/config_server_pb/v2/agent.pb.h rename to core/protobuf/config_server/v2/agent.pb.h index b9d9b16ecf..b4ed3cf645 100644 --- a/core/config_server_pb/v2/agent.pb.h +++ b/core/protobuf/config_server/v2/agent.pb.h @@ -83,6 +83,9 @@ extern CommandDetailDefaultTypeInternal _CommandDetail_default_instance_; class CommandInfo; class CommandInfoDefaultTypeInternal; extern CommandInfoDefaultTypeInternal _CommandInfo_default_instance_; +class CommonResponse; +class CommonResponseDefaultTypeInternal; +extern CommonResponseDefaultTypeInternal _CommonResponse_default_instance_; class ConfigDetail; class ConfigDetailDefaultTypeInternal; extern ConfigDetailDefaultTypeInternal _ConfigDetail_default_instance_; @@ -101,9 +104,6 @@ extern HeartbeatRequestDefaultTypeInternal _HeartbeatRequest_default_instance_; class HeartbeatResponse; class HeartbeatResponseDefaultTypeInternal; extern HeartbeatResponseDefaultTypeInternal _HeartbeatResponse_default_instance_; -class ServerErrorResponse; -class ServerErrorResponseDefaultTypeInternal; -extern ServerErrorResponseDefaultTypeInternal _ServerErrorResponse_default_instance_; } // namespace v2 } // namespace proto } // namespace configserver @@ -114,13 +114,13 @@ template<> ::configserver::proto::v2::AgentAttributes_ExtrasEntry_DoNotUse* Aren template<> ::configserver::proto::v2::AgentGroupTag* Arena::CreateMaybeMessage<::configserver::proto::v2::AgentGroupTag>(Arena*); template<> ::configserver::proto::v2::CommandDetail* Arena::CreateMaybeMessage<::configserver::proto::v2::CommandDetail>(Arena*); template<> ::configserver::proto::v2::CommandInfo* Arena::CreateMaybeMessage<::configserver::proto::v2::CommandInfo>(Arena*); +template<> ::configserver::proto::v2::CommonResponse* Arena::CreateMaybeMessage<::configserver::proto::v2::CommonResponse>(Arena*); template<> ::configserver::proto::v2::ConfigDetail* Arena::CreateMaybeMessage<::configserver::proto::v2::ConfigDetail>(Arena*); template<> ::configserver::proto::v2::ConfigInfo* Arena::CreateMaybeMessage<::configserver::proto::v2::ConfigInfo>(Arena*); template<> ::configserver::proto::v2::FetchConfigRequest* Arena::CreateMaybeMessage<::configserver::proto::v2::FetchConfigRequest>(Arena*); template<> ::configserver::proto::v2::FetchConfigResponse* Arena::CreateMaybeMessage<::configserver::proto::v2::FetchConfigResponse>(Arena*); template<> ::configserver::proto::v2::HeartbeatRequest* Arena::CreateMaybeMessage<::configserver::proto::v2::HeartbeatRequest>(Arena*); template<> ::configserver::proto::v2::HeartbeatResponse* Arena::CreateMaybeMessage<::configserver::proto::v2::HeartbeatResponse>(Arena*); -template<> ::configserver::proto::v2::ServerErrorResponse* Arena::CreateMaybeMessage<::configserver::proto::v2::ServerErrorResponse>(Arena*); } // namespace protobuf } // namespace google namespace configserver { @@ -153,7 +153,7 @@ inline bool ConfigStatus_Parse( enum AgentCapabilities { UnspecifiedAgentCapability = 0, AcceptsPipelineConfig = 1, - AcceptsProcessConfig = 2, + AcceptsInstanceConfig = 2, AcceptsCustomCommand = 4, AgentCapabilities_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min, AgentCapabilities_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max @@ -198,7 +198,7 @@ enum ServerCapabilities { UnspecifiedServerCapability = 0, RembersAttribute = 1, RembersPipelineConfigStatus = 2, - RembersProcessConfigStatus = 4, + RembersInstanceConfigStatus = 4, RembersCustomCommandStatus = 8, ServerCapabilities_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min, ServerCapabilities_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max @@ -222,13 +222,13 @@ enum ResponseFlags { ResponseFlagsUnspecified = 0, ReportFullState = 1, FetchPipelineConfigDetail = 2, - FetchProcessConfigDetail = 4, + FetchInstanceConfigDetail = 4, ResponseFlags_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min, ResponseFlags_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max }; bool ResponseFlags_IsValid(int value); const ResponseFlags ResponseFlags_MIN = ResponseFlagsUnspecified; -const ResponseFlags ResponseFlags_MAX = FetchProcessConfigDetail; +const ResponseFlags ResponseFlags_MAX = FetchInstanceConfigDetail; const int ResponseFlags_ARRAYSIZE = ResponseFlags_MAX + 1; const ::google::protobuf::EnumDescriptor* ResponseFlags_descriptor(); @@ -946,17 +946,17 @@ class HeartbeatRequest : public ::google::protobuf::Message /* @@protoc_insertio const ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigInfo >& pipeline_configs() const; - // repeated .configserver.proto.v2.ConfigInfo process_configs = 11; - int process_configs_size() const; - void clear_process_configs(); - static const int kProcessConfigsFieldNumber = 11; - ::configserver::proto::v2::ConfigInfo* mutable_process_configs(int index); + // repeated .configserver.proto.v2.ConfigInfo instance_configs = 11; + int instance_configs_size() const; + void clear_instance_configs(); + static const int kInstanceConfigsFieldNumber = 11; + ::configserver::proto::v2::ConfigInfo* mutable_instance_configs(int index); ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigInfo >* - mutable_process_configs(); - const ::configserver::proto::v2::ConfigInfo& process_configs(int index) const; - ::configserver::proto::v2::ConfigInfo* add_process_configs(); + mutable_instance_configs(); + const ::configserver::proto::v2::ConfigInfo& instance_configs(int index) const; + ::configserver::proto::v2::ConfigInfo* add_instance_configs(); const ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigInfo >& - process_configs() const; + instance_configs() const; // repeated .configserver.proto.v2.CommandInfo custom_commands = 12; int custom_commands_size() const; @@ -1082,7 +1082,7 @@ class HeartbeatRequest : public ::google::protobuf::Message /* @@protoc_insertio ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::AgentGroupTag > tags_; ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigInfo > pipeline_configs_; - ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigInfo > process_configs_; + ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigInfo > instance_configs_; ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::CommandInfo > custom_commands_; ::google::protobuf::internal::ArenaStringPtr request_id_; ::google::protobuf::internal::ArenaStringPtr instance_id_; @@ -1380,124 +1380,6 @@ class CommandDetail : public ::google::protobuf::Message /* @@protoc_insertion_p }; // ------------------------------------------------------------------- -class ServerErrorResponse : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:configserver.proto.v2.ServerErrorResponse) */ { - public: - ServerErrorResponse(); - virtual ~ServerErrorResponse(); - - ServerErrorResponse(const ServerErrorResponse& from); - - inline ServerErrorResponse& operator=(const ServerErrorResponse& from) { - CopyFrom(from); - return *this; - } - #if LANG_CXX11 - ServerErrorResponse(ServerErrorResponse&& from) noexcept - : ServerErrorResponse() { - *this = ::std::move(from); - } - - inline ServerErrorResponse& operator=(ServerErrorResponse&& from) noexcept { - if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { - if (this != &from) InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - #endif - static const ::google::protobuf::Descriptor* descriptor(); - static const ServerErrorResponse& default_instance(); - - static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY - static inline const ServerErrorResponse* internal_default_instance() { - return reinterpret_cast( - &_ServerErrorResponse_default_instance_); - } - static constexpr int kIndexInFileMessages = - 8; - - void Swap(ServerErrorResponse* other); - friend void swap(ServerErrorResponse& a, ServerErrorResponse& b) { - a.Swap(&b); - } - - // implements Message ---------------------------------------------- - - inline ServerErrorResponse* New() const final { - return CreateMaybeMessage(NULL); - } - - ServerErrorResponse* New(::google::protobuf::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - void CopyFrom(const ::google::protobuf::Message& from) final; - void MergeFrom(const ::google::protobuf::Message& from) final; - void CopyFrom(const ServerErrorResponse& from); - void MergeFrom(const ServerErrorResponse& from); - void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - bool MergePartialFromCodedStream( - ::google::protobuf::io::CodedInputStream* input) final; - void SerializeWithCachedSizes( - ::google::protobuf::io::CodedOutputStream* output) const final; - ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( - bool deterministic, ::google::protobuf::uint8* target) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(ServerErrorResponse* other); - private: - inline ::google::protobuf::Arena* GetArenaNoVirtual() const { - return NULL; - } - inline void* MaybeArenaPtr() const { - return NULL; - } - public: - - ::google::protobuf::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - // string error_message = 2; - void clear_error_message(); - static const int kErrorMessageFieldNumber = 2; - const ::std::string& error_message() const; - void set_error_message(const ::std::string& value); - #if LANG_CXX11 - void set_error_message(::std::string&& value); - #endif - void set_error_message(const char* value); - void set_error_message(const char* value, size_t size); - ::std::string* mutable_error_message(); - ::std::string* release_error_message(); - void set_allocated_error_message(::std::string* error_message); - - // int32 error_code = 1; - void clear_error_code(); - static const int kErrorCodeFieldNumber = 1; - ::google::protobuf::int32 error_code() const; - void set_error_code(::google::protobuf::int32 value); - - // @@protoc_insertion_point(class_scope:configserver.proto.v2.ServerErrorResponse) - private: - - ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; - ::google::protobuf::internal::ArenaStringPtr error_message_; - ::google::protobuf::int32 error_code_; - mutable ::google::protobuf::internal::CachedSize _cached_size_; - friend struct ::protobuf_v2_2fagent_2eproto::TableStruct; -}; -// ------------------------------------------------------------------- - class HeartbeatResponse : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:configserver.proto.v2.HeartbeatResponse) */ { public: HeartbeatResponse(); @@ -1533,7 +1415,7 @@ class HeartbeatResponse : public ::google::protobuf::Message /* @@protoc_inserti &_HeartbeatResponse_default_instance_); } static constexpr int kIndexInFileMessages = - 9; + 8; void Swap(HeartbeatResponse* other); friend void swap(HeartbeatResponse& a, HeartbeatResponse& b) { @@ -1597,17 +1479,17 @@ class HeartbeatResponse : public ::google::protobuf::Message /* @@protoc_inserti const ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail >& pipeline_config_updates() const; - // repeated .configserver.proto.v2.ConfigDetail process_config_updates = 5; - int process_config_updates_size() const; - void clear_process_config_updates(); - static const int kProcessConfigUpdatesFieldNumber = 5; - ::configserver::proto::v2::ConfigDetail* mutable_process_config_updates(int index); + // repeated .configserver.proto.v2.ConfigDetail instance_config_updates = 5; + int instance_config_updates_size() const; + void clear_instance_config_updates(); + static const int kInstanceConfigUpdatesFieldNumber = 5; + ::configserver::proto::v2::ConfigDetail* mutable_instance_config_updates(int index); ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail >* - mutable_process_config_updates(); - const ::configserver::proto::v2::ConfigDetail& process_config_updates(int index) const; - ::configserver::proto::v2::ConfigDetail* add_process_config_updates(); + mutable_instance_config_updates(); + const ::configserver::proto::v2::ConfigDetail& instance_config_updates(int index) const; + ::configserver::proto::v2::ConfigDetail* add_instance_config_updates(); const ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail >& - process_config_updates() const; + instance_config_updates() const; // repeated .configserver.proto.v2.CommandDetail custom_command_updates = 6; int custom_command_updates_size() const; @@ -1649,17 +1531,17 @@ class HeartbeatResponse : public ::google::protobuf::Message /* @@protoc_inserti ::std::string* release_opaque(); void set_allocated_opaque(::std::string* opaque); - // .configserver.proto.v2.ServerErrorResponse error_response = 2; - bool has_error_response() const; - void clear_error_response(); - static const int kErrorResponseFieldNumber = 2; + // .configserver.proto.v2.CommonResponse commonResponse = 2; + bool has_commonresponse() const; + void clear_commonresponse(); + static const int kCommonResponseFieldNumber = 2; private: - const ::configserver::proto::v2::ServerErrorResponse& _internal_error_response() const; + const ::configserver::proto::v2::CommonResponse& _internal_commonresponse() const; public: - const ::configserver::proto::v2::ServerErrorResponse& error_response() const; - ::configserver::proto::v2::ServerErrorResponse* release_error_response(); - ::configserver::proto::v2::ServerErrorResponse* mutable_error_response(); - void set_allocated_error_response(::configserver::proto::v2::ServerErrorResponse* error_response); + const ::configserver::proto::v2::CommonResponse& commonresponse() const; + ::configserver::proto::v2::CommonResponse* release_commonresponse(); + ::configserver::proto::v2::CommonResponse* mutable_commonresponse(); + void set_allocated_commonresponse(::configserver::proto::v2::CommonResponse* commonresponse); // uint64 capabilities = 3; void clear_capabilities(); @@ -1678,11 +1560,11 @@ class HeartbeatResponse : public ::google::protobuf::Message /* @@protoc_inserti ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail > pipeline_config_updates_; - ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail > process_config_updates_; + ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail > instance_config_updates_; ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::CommandDetail > custom_command_updates_; ::google::protobuf::internal::ArenaStringPtr request_id_; ::google::protobuf::internal::ArenaStringPtr opaque_; - ::configserver::proto::v2::ServerErrorResponse* error_response_; + ::configserver::proto::v2::CommonResponse* commonresponse_; ::google::protobuf::uint64 capabilities_; ::google::protobuf::uint64 flags_; mutable ::google::protobuf::internal::CachedSize _cached_size_; @@ -1725,7 +1607,7 @@ class FetchConfigRequest : public ::google::protobuf::Message /* @@protoc_insert &_FetchConfigRequest_default_instance_); } static constexpr int kIndexInFileMessages = - 10; + 9; void Swap(FetchConfigRequest* other); friend void swap(FetchConfigRequest& a, FetchConfigRequest& b) { @@ -1864,7 +1746,7 @@ class FetchConfigResponse : public ::google::protobuf::Message /* @@protoc_inser &_FetchConfigResponse_default_instance_); } static constexpr int kIndexInFileMessages = - 11; + 10; void Swap(FetchConfigResponse* other); friend void swap(FetchConfigResponse& a, FetchConfigResponse& b) { @@ -1942,17 +1824,17 @@ class FetchConfigResponse : public ::google::protobuf::Message /* @@protoc_inser ::std::string* release_request_id(); void set_allocated_request_id(::std::string* request_id); - // .configserver.proto.v2.ServerErrorResponse error_response = 2; - bool has_error_response() const; - void clear_error_response(); - static const int kErrorResponseFieldNumber = 2; + // .configserver.proto.v2.CommonResponse commonResponse = 2; + bool has_commonresponse() const; + void clear_commonresponse(); + static const int kCommonResponseFieldNumber = 2; private: - const ::configserver::proto::v2::ServerErrorResponse& _internal_error_response() const; + const ::configserver::proto::v2::CommonResponse& _internal_commonresponse() const; public: - const ::configserver::proto::v2::ServerErrorResponse& error_response() const; - ::configserver::proto::v2::ServerErrorResponse* release_error_response(); - ::configserver::proto::v2::ServerErrorResponse* mutable_error_response(); - void set_allocated_error_response(::configserver::proto::v2::ServerErrorResponse* error_response); + const ::configserver::proto::v2::CommonResponse& commonresponse() const; + ::configserver::proto::v2::CommonResponse* release_commonresponse(); + ::configserver::proto::v2::CommonResponse* mutable_commonresponse(); + void set_allocated_commonresponse(::configserver::proto::v2::CommonResponse* commonresponse); // @@protoc_insertion_point(class_scope:configserver.proto.v2.FetchConfigResponse) private: @@ -1960,7 +1842,125 @@ class FetchConfigResponse : public ::google::protobuf::Message /* @@protoc_inser ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail > config_details_; ::google::protobuf::internal::ArenaStringPtr request_id_; - ::configserver::proto::v2::ServerErrorResponse* error_response_; + ::configserver::proto::v2::CommonResponse* commonresponse_; + mutable ::google::protobuf::internal::CachedSize _cached_size_; + friend struct ::protobuf_v2_2fagent_2eproto::TableStruct; +}; +// ------------------------------------------------------------------- + +class CommonResponse : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:configserver.proto.v2.CommonResponse) */ { + public: + CommonResponse(); + virtual ~CommonResponse(); + + CommonResponse(const CommonResponse& from); + + inline CommonResponse& operator=(const CommonResponse& from) { + CopyFrom(from); + return *this; + } + #if LANG_CXX11 + CommonResponse(CommonResponse&& from) noexcept + : CommonResponse() { + *this = ::std::move(from); + } + + inline CommonResponse& operator=(CommonResponse&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + #endif + static const ::google::protobuf::Descriptor* descriptor(); + static const CommonResponse& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const CommonResponse* internal_default_instance() { + return reinterpret_cast( + &_CommonResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 11; + + void Swap(CommonResponse* other); + friend void swap(CommonResponse& a, CommonResponse& b) { + a.Swap(&b); + } + + // implements Message ---------------------------------------------- + + inline CommonResponse* New() const final { + return CreateMaybeMessage(NULL); + } + + CommonResponse* New(::google::protobuf::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::google::protobuf::Message& from) final; + void MergeFrom(const ::google::protobuf::Message& from) final; + void CopyFrom(const CommonResponse& from); + void MergeFrom(const CommonResponse& from); + void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) final; + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const final; + ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( + bool deterministic, ::google::protobuf::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CommonResponse* other); + private: + inline ::google::protobuf::Arena* GetArenaNoVirtual() const { + return NULL; + } + inline void* MaybeArenaPtr() const { + return NULL; + } + public: + + ::google::protobuf::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // bytes errorMessage = 2; + void clear_errormessage(); + static const int kErrorMessageFieldNumber = 2; + const ::std::string& errormessage() const; + void set_errormessage(const ::std::string& value); + #if LANG_CXX11 + void set_errormessage(::std::string&& value); + #endif + void set_errormessage(const char* value); + void set_errormessage(const void* value, size_t size); + ::std::string* mutable_errormessage(); + ::std::string* release_errormessage(); + void set_allocated_errormessage(::std::string* errormessage); + + // int32 status = 1; + void clear_status(); + static const int kStatusFieldNumber = 1; + ::google::protobuf::int32 status() const; + void set_status(::google::protobuf::int32 value); + + // @@protoc_insertion_point(class_scope:configserver.proto.v2.CommonResponse) + private: + + ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; + ::google::protobuf::internal::ArenaStringPtr errormessage_; + ::google::protobuf::int32 status_; mutable ::google::protobuf::internal::CachedSize _cached_size_; friend struct ::protobuf_v2_2fagent_2eproto::TableStruct; }; @@ -2951,34 +2951,34 @@ HeartbeatRequest::pipeline_configs() const { return pipeline_configs_; } -// repeated .configserver.proto.v2.ConfigInfo process_configs = 11; -inline int HeartbeatRequest::process_configs_size() const { - return process_configs_.size(); +// repeated .configserver.proto.v2.ConfigInfo instance_configs = 11; +inline int HeartbeatRequest::instance_configs_size() const { + return instance_configs_.size(); } -inline void HeartbeatRequest::clear_process_configs() { - process_configs_.Clear(); +inline void HeartbeatRequest::clear_instance_configs() { + instance_configs_.Clear(); } -inline ::configserver::proto::v2::ConfigInfo* HeartbeatRequest::mutable_process_configs(int index) { - // @@protoc_insertion_point(field_mutable:configserver.proto.v2.HeartbeatRequest.process_configs) - return process_configs_.Mutable(index); +inline ::configserver::proto::v2::ConfigInfo* HeartbeatRequest::mutable_instance_configs(int index) { + // @@protoc_insertion_point(field_mutable:configserver.proto.v2.HeartbeatRequest.instance_configs) + return instance_configs_.Mutable(index); } inline ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigInfo >* -HeartbeatRequest::mutable_process_configs() { - // @@protoc_insertion_point(field_mutable_list:configserver.proto.v2.HeartbeatRequest.process_configs) - return &process_configs_; +HeartbeatRequest::mutable_instance_configs() { + // @@protoc_insertion_point(field_mutable_list:configserver.proto.v2.HeartbeatRequest.instance_configs) + return &instance_configs_; } -inline const ::configserver::proto::v2::ConfigInfo& HeartbeatRequest::process_configs(int index) const { - // @@protoc_insertion_point(field_get:configserver.proto.v2.HeartbeatRequest.process_configs) - return process_configs_.Get(index); +inline const ::configserver::proto::v2::ConfigInfo& HeartbeatRequest::instance_configs(int index) const { + // @@protoc_insertion_point(field_get:configserver.proto.v2.HeartbeatRequest.instance_configs) + return instance_configs_.Get(index); } -inline ::configserver::proto::v2::ConfigInfo* HeartbeatRequest::add_process_configs() { - // @@protoc_insertion_point(field_add:configserver.proto.v2.HeartbeatRequest.process_configs) - return process_configs_.Add(); +inline ::configserver::proto::v2::ConfigInfo* HeartbeatRequest::add_instance_configs() { + // @@protoc_insertion_point(field_add:configserver.proto.v2.HeartbeatRequest.instance_configs) + return instance_configs_.Add(); } inline const ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigInfo >& -HeartbeatRequest::process_configs() const { - // @@protoc_insertion_point(field_list:configserver.proto.v2.HeartbeatRequest.process_configs) - return process_configs_; +HeartbeatRequest::instance_configs() const { + // @@protoc_insertion_point(field_list:configserver.proto.v2.HeartbeatRequest.instance_configs) + return instance_configs_; } // repeated .configserver.proto.v2.CommandInfo custom_commands = 12; @@ -3381,77 +3381,6 @@ inline void CommandDetail::set_expire_time(::google::protobuf::int64 value) { // ------------------------------------------------------------------- -// ServerErrorResponse - -// int32 error_code = 1; -inline void ServerErrorResponse::clear_error_code() { - error_code_ = 0; -} -inline ::google::protobuf::int32 ServerErrorResponse::error_code() const { - // @@protoc_insertion_point(field_get:configserver.proto.v2.ServerErrorResponse.error_code) - return error_code_; -} -inline void ServerErrorResponse::set_error_code(::google::protobuf::int32 value) { - - error_code_ = value; - // @@protoc_insertion_point(field_set:configserver.proto.v2.ServerErrorResponse.error_code) -} - -// string error_message = 2; -inline void ServerErrorResponse::clear_error_message() { - error_message_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -inline const ::std::string& ServerErrorResponse::error_message() const { - // @@protoc_insertion_point(field_get:configserver.proto.v2.ServerErrorResponse.error_message) - return error_message_.GetNoArena(); -} -inline void ServerErrorResponse::set_error_message(const ::std::string& value) { - - error_message_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); - // @@protoc_insertion_point(field_set:configserver.proto.v2.ServerErrorResponse.error_message) -} -#if LANG_CXX11 -inline void ServerErrorResponse::set_error_message(::std::string&& value) { - - error_message_.SetNoArena( - &::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); - // @@protoc_insertion_point(field_set_rvalue:configserver.proto.v2.ServerErrorResponse.error_message) -} -#endif -inline void ServerErrorResponse::set_error_message(const char* value) { - GOOGLE_DCHECK(value != NULL); - - error_message_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); - // @@protoc_insertion_point(field_set_char:configserver.proto.v2.ServerErrorResponse.error_message) -} -inline void ServerErrorResponse::set_error_message(const char* value, size_t size) { - - error_message_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), - ::std::string(reinterpret_cast(value), size)); - // @@protoc_insertion_point(field_set_pointer:configserver.proto.v2.ServerErrorResponse.error_message) -} -inline ::std::string* ServerErrorResponse::mutable_error_message() { - - // @@protoc_insertion_point(field_mutable:configserver.proto.v2.ServerErrorResponse.error_message) - return error_message_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -inline ::std::string* ServerErrorResponse::release_error_message() { - // @@protoc_insertion_point(field_release:configserver.proto.v2.ServerErrorResponse.error_message) - - return error_message_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); -} -inline void ServerErrorResponse::set_allocated_error_message(::std::string* error_message) { - if (error_message != NULL) { - - } else { - - } - error_message_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), error_message); - // @@protoc_insertion_point(field_set_allocated:configserver.proto.v2.ServerErrorResponse.error_message) -} - -// ------------------------------------------------------------------- - // HeartbeatResponse // bytes request_id = 1; @@ -3507,58 +3436,58 @@ inline void HeartbeatResponse::set_allocated_request_id(::std::string* request_i // @@protoc_insertion_point(field_set_allocated:configserver.proto.v2.HeartbeatResponse.request_id) } -// .configserver.proto.v2.ServerErrorResponse error_response = 2; -inline bool HeartbeatResponse::has_error_response() const { - return this != internal_default_instance() && error_response_ != NULL; +// .configserver.proto.v2.CommonResponse commonResponse = 2; +inline bool HeartbeatResponse::has_commonresponse() const { + return this != internal_default_instance() && commonresponse_ != NULL; } -inline void HeartbeatResponse::clear_error_response() { - if (GetArenaNoVirtual() == NULL && error_response_ != NULL) { - delete error_response_; +inline void HeartbeatResponse::clear_commonresponse() { + if (GetArenaNoVirtual() == NULL && commonresponse_ != NULL) { + delete commonresponse_; } - error_response_ = NULL; + commonresponse_ = NULL; } -inline const ::configserver::proto::v2::ServerErrorResponse& HeartbeatResponse::_internal_error_response() const { - return *error_response_; +inline const ::configserver::proto::v2::CommonResponse& HeartbeatResponse::_internal_commonresponse() const { + return *commonresponse_; } -inline const ::configserver::proto::v2::ServerErrorResponse& HeartbeatResponse::error_response() const { - const ::configserver::proto::v2::ServerErrorResponse* p = error_response_; - // @@protoc_insertion_point(field_get:configserver.proto.v2.HeartbeatResponse.error_response) - return p != NULL ? *p : *reinterpret_cast( - &::configserver::proto::v2::_ServerErrorResponse_default_instance_); +inline const ::configserver::proto::v2::CommonResponse& HeartbeatResponse::commonresponse() const { + const ::configserver::proto::v2::CommonResponse* p = commonresponse_; + // @@protoc_insertion_point(field_get:configserver.proto.v2.HeartbeatResponse.commonResponse) + return p != NULL ? *p : *reinterpret_cast( + &::configserver::proto::v2::_CommonResponse_default_instance_); } -inline ::configserver::proto::v2::ServerErrorResponse* HeartbeatResponse::release_error_response() { - // @@protoc_insertion_point(field_release:configserver.proto.v2.HeartbeatResponse.error_response) +inline ::configserver::proto::v2::CommonResponse* HeartbeatResponse::release_commonresponse() { + // @@protoc_insertion_point(field_release:configserver.proto.v2.HeartbeatResponse.commonResponse) - ::configserver::proto::v2::ServerErrorResponse* temp = error_response_; - error_response_ = NULL; + ::configserver::proto::v2::CommonResponse* temp = commonresponse_; + commonresponse_ = NULL; return temp; } -inline ::configserver::proto::v2::ServerErrorResponse* HeartbeatResponse::mutable_error_response() { +inline ::configserver::proto::v2::CommonResponse* HeartbeatResponse::mutable_commonresponse() { - if (error_response_ == NULL) { - auto* p = CreateMaybeMessage<::configserver::proto::v2::ServerErrorResponse>(GetArenaNoVirtual()); - error_response_ = p; + if (commonresponse_ == NULL) { + auto* p = CreateMaybeMessage<::configserver::proto::v2::CommonResponse>(GetArenaNoVirtual()); + commonresponse_ = p; } - // @@protoc_insertion_point(field_mutable:configserver.proto.v2.HeartbeatResponse.error_response) - return error_response_; + // @@protoc_insertion_point(field_mutable:configserver.proto.v2.HeartbeatResponse.commonResponse) + return commonresponse_; } -inline void HeartbeatResponse::set_allocated_error_response(::configserver::proto::v2::ServerErrorResponse* error_response) { +inline void HeartbeatResponse::set_allocated_commonresponse(::configserver::proto::v2::CommonResponse* commonresponse) { ::google::protobuf::Arena* message_arena = GetArenaNoVirtual(); if (message_arena == NULL) { - delete error_response_; + delete commonresponse_; } - if (error_response) { + if (commonresponse) { ::google::protobuf::Arena* submessage_arena = NULL; if (message_arena != submessage_arena) { - error_response = ::google::protobuf::internal::GetOwnedMessage( - message_arena, error_response, submessage_arena); + commonresponse = ::google::protobuf::internal::GetOwnedMessage( + message_arena, commonresponse, submessage_arena); } } else { } - error_response_ = error_response; - // @@protoc_insertion_point(field_set_allocated:configserver.proto.v2.HeartbeatResponse.error_response) + commonresponse_ = commonresponse; + // @@protoc_insertion_point(field_set_allocated:configserver.proto.v2.HeartbeatResponse.commonResponse) } // uint64 capabilities = 3; @@ -3605,34 +3534,34 @@ HeartbeatResponse::pipeline_config_updates() const { return pipeline_config_updates_; } -// repeated .configserver.proto.v2.ConfigDetail process_config_updates = 5; -inline int HeartbeatResponse::process_config_updates_size() const { - return process_config_updates_.size(); +// repeated .configserver.proto.v2.ConfigDetail instance_config_updates = 5; +inline int HeartbeatResponse::instance_config_updates_size() const { + return instance_config_updates_.size(); } -inline void HeartbeatResponse::clear_process_config_updates() { - process_config_updates_.Clear(); +inline void HeartbeatResponse::clear_instance_config_updates() { + instance_config_updates_.Clear(); } -inline ::configserver::proto::v2::ConfigDetail* HeartbeatResponse::mutable_process_config_updates(int index) { - // @@protoc_insertion_point(field_mutable:configserver.proto.v2.HeartbeatResponse.process_config_updates) - return process_config_updates_.Mutable(index); +inline ::configserver::proto::v2::ConfigDetail* HeartbeatResponse::mutable_instance_config_updates(int index) { + // @@protoc_insertion_point(field_mutable:configserver.proto.v2.HeartbeatResponse.instance_config_updates) + return instance_config_updates_.Mutable(index); } inline ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail >* -HeartbeatResponse::mutable_process_config_updates() { - // @@protoc_insertion_point(field_mutable_list:configserver.proto.v2.HeartbeatResponse.process_config_updates) - return &process_config_updates_; +HeartbeatResponse::mutable_instance_config_updates() { + // @@protoc_insertion_point(field_mutable_list:configserver.proto.v2.HeartbeatResponse.instance_config_updates) + return &instance_config_updates_; } -inline const ::configserver::proto::v2::ConfigDetail& HeartbeatResponse::process_config_updates(int index) const { - // @@protoc_insertion_point(field_get:configserver.proto.v2.HeartbeatResponse.process_config_updates) - return process_config_updates_.Get(index); +inline const ::configserver::proto::v2::ConfigDetail& HeartbeatResponse::instance_config_updates(int index) const { + // @@protoc_insertion_point(field_get:configserver.proto.v2.HeartbeatResponse.instance_config_updates) + return instance_config_updates_.Get(index); } -inline ::configserver::proto::v2::ConfigDetail* HeartbeatResponse::add_process_config_updates() { - // @@protoc_insertion_point(field_add:configserver.proto.v2.HeartbeatResponse.process_config_updates) - return process_config_updates_.Add(); +inline ::configserver::proto::v2::ConfigDetail* HeartbeatResponse::add_instance_config_updates() { + // @@protoc_insertion_point(field_add:configserver.proto.v2.HeartbeatResponse.instance_config_updates) + return instance_config_updates_.Add(); } inline const ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail >& -HeartbeatResponse::process_config_updates() const { - // @@protoc_insertion_point(field_list:configserver.proto.v2.HeartbeatResponse.process_config_updates) - return process_config_updates_; +HeartbeatResponse::instance_config_updates() const { + // @@protoc_insertion_point(field_list:configserver.proto.v2.HeartbeatResponse.instance_config_updates) + return instance_config_updates_; } // repeated .configserver.proto.v2.CommandDetail custom_command_updates = 6; @@ -3929,58 +3858,58 @@ inline void FetchConfigResponse::set_allocated_request_id(::std::string* request // @@protoc_insertion_point(field_set_allocated:configserver.proto.v2.FetchConfigResponse.request_id) } -// .configserver.proto.v2.ServerErrorResponse error_response = 2; -inline bool FetchConfigResponse::has_error_response() const { - return this != internal_default_instance() && error_response_ != NULL; +// .configserver.proto.v2.CommonResponse commonResponse = 2; +inline bool FetchConfigResponse::has_commonresponse() const { + return this != internal_default_instance() && commonresponse_ != NULL; } -inline void FetchConfigResponse::clear_error_response() { - if (GetArenaNoVirtual() == NULL && error_response_ != NULL) { - delete error_response_; +inline void FetchConfigResponse::clear_commonresponse() { + if (GetArenaNoVirtual() == NULL && commonresponse_ != NULL) { + delete commonresponse_; } - error_response_ = NULL; + commonresponse_ = NULL; } -inline const ::configserver::proto::v2::ServerErrorResponse& FetchConfigResponse::_internal_error_response() const { - return *error_response_; +inline const ::configserver::proto::v2::CommonResponse& FetchConfigResponse::_internal_commonresponse() const { + return *commonresponse_; } -inline const ::configserver::proto::v2::ServerErrorResponse& FetchConfigResponse::error_response() const { - const ::configserver::proto::v2::ServerErrorResponse* p = error_response_; - // @@protoc_insertion_point(field_get:configserver.proto.v2.FetchConfigResponse.error_response) - return p != NULL ? *p : *reinterpret_cast( - &::configserver::proto::v2::_ServerErrorResponse_default_instance_); +inline const ::configserver::proto::v2::CommonResponse& FetchConfigResponse::commonresponse() const { + const ::configserver::proto::v2::CommonResponse* p = commonresponse_; + // @@protoc_insertion_point(field_get:configserver.proto.v2.FetchConfigResponse.commonResponse) + return p != NULL ? *p : *reinterpret_cast( + &::configserver::proto::v2::_CommonResponse_default_instance_); } -inline ::configserver::proto::v2::ServerErrorResponse* FetchConfigResponse::release_error_response() { - // @@protoc_insertion_point(field_release:configserver.proto.v2.FetchConfigResponse.error_response) +inline ::configserver::proto::v2::CommonResponse* FetchConfigResponse::release_commonresponse() { + // @@protoc_insertion_point(field_release:configserver.proto.v2.FetchConfigResponse.commonResponse) - ::configserver::proto::v2::ServerErrorResponse* temp = error_response_; - error_response_ = NULL; + ::configserver::proto::v2::CommonResponse* temp = commonresponse_; + commonresponse_ = NULL; return temp; } -inline ::configserver::proto::v2::ServerErrorResponse* FetchConfigResponse::mutable_error_response() { +inline ::configserver::proto::v2::CommonResponse* FetchConfigResponse::mutable_commonresponse() { - if (error_response_ == NULL) { - auto* p = CreateMaybeMessage<::configserver::proto::v2::ServerErrorResponse>(GetArenaNoVirtual()); - error_response_ = p; + if (commonresponse_ == NULL) { + auto* p = CreateMaybeMessage<::configserver::proto::v2::CommonResponse>(GetArenaNoVirtual()); + commonresponse_ = p; } - // @@protoc_insertion_point(field_mutable:configserver.proto.v2.FetchConfigResponse.error_response) - return error_response_; + // @@protoc_insertion_point(field_mutable:configserver.proto.v2.FetchConfigResponse.commonResponse) + return commonresponse_; } -inline void FetchConfigResponse::set_allocated_error_response(::configserver::proto::v2::ServerErrorResponse* error_response) { +inline void FetchConfigResponse::set_allocated_commonresponse(::configserver::proto::v2::CommonResponse* commonresponse) { ::google::protobuf::Arena* message_arena = GetArenaNoVirtual(); if (message_arena == NULL) { - delete error_response_; + delete commonresponse_; } - if (error_response) { + if (commonresponse) { ::google::protobuf::Arena* submessage_arena = NULL; if (message_arena != submessage_arena) { - error_response = ::google::protobuf::internal::GetOwnedMessage( - message_arena, error_response, submessage_arena); + commonresponse = ::google::protobuf::internal::GetOwnedMessage( + message_arena, commonresponse, submessage_arena); } } else { } - error_response_ = error_response; - // @@protoc_insertion_point(field_set_allocated:configserver.proto.v2.FetchConfigResponse.error_response) + commonresponse_ = commonresponse; + // @@protoc_insertion_point(field_set_allocated:configserver.proto.v2.FetchConfigResponse.commonResponse) } // repeated .configserver.proto.v2.ConfigDetail config_details = 3; @@ -4013,6 +3942,77 @@ FetchConfigResponse::config_details() const { return config_details_; } +// ------------------------------------------------------------------- + +// CommonResponse + +// int32 status = 1; +inline void CommonResponse::clear_status() { + status_ = 0; +} +inline ::google::protobuf::int32 CommonResponse::status() const { + // @@protoc_insertion_point(field_get:configserver.proto.v2.CommonResponse.status) + return status_; +} +inline void CommonResponse::set_status(::google::protobuf::int32 value) { + + status_ = value; + // @@protoc_insertion_point(field_set:configserver.proto.v2.CommonResponse.status) +} + +// bytes errorMessage = 2; +inline void CommonResponse::clear_errormessage() { + errormessage_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline const ::std::string& CommonResponse::errormessage() const { + // @@protoc_insertion_point(field_get:configserver.proto.v2.CommonResponse.errorMessage) + return errormessage_.GetNoArena(); +} +inline void CommonResponse::set_errormessage(const ::std::string& value) { + + errormessage_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:configserver.proto.v2.CommonResponse.errorMessage) +} +#if LANG_CXX11 +inline void CommonResponse::set_errormessage(::std::string&& value) { + + errormessage_.SetNoArena( + &::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:configserver.proto.v2.CommonResponse.errorMessage) +} +#endif +inline void CommonResponse::set_errormessage(const char* value) { + GOOGLE_DCHECK(value != NULL); + + errormessage_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:configserver.proto.v2.CommonResponse.errorMessage) +} +inline void CommonResponse::set_errormessage(const void* value, size_t size) { + + errormessage_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:configserver.proto.v2.CommonResponse.errorMessage) +} +inline ::std::string* CommonResponse::mutable_errormessage() { + + // @@protoc_insertion_point(field_mutable:configserver.proto.v2.CommonResponse.errorMessage) + return errormessage_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline ::std::string* CommonResponse::release_errormessage() { + // @@protoc_insertion_point(field_release:configserver.proto.v2.CommonResponse.errorMessage) + + return errormessage_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline void CommonResponse::set_allocated_errormessage(::std::string* errormessage) { + if (errormessage != NULL) { + + } else { + + } + errormessage_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), errormessage); + // @@protoc_insertion_point(field_set_allocated:configserver.proto.v2.CommonResponse.errorMessage) +} + #ifdef __GNUC__ #pragma GCC diagnostic pop #endif // __GNUC__ diff --git a/core/log_pb/RawLog.cpp b/core/protobuf/sls/RawLog.cpp similarity index 100% rename from core/log_pb/RawLog.cpp rename to core/protobuf/sls/RawLog.cpp diff --git a/core/log_pb/RawLog.h b/core/protobuf/sls/RawLog.h similarity index 100% rename from core/log_pb/RawLog.h rename to core/protobuf/sls/RawLog.h diff --git a/core/log_pb/RawLogGroup.cpp b/core/protobuf/sls/RawLogGroup.cpp similarity index 100% rename from core/log_pb/RawLogGroup.cpp rename to core/protobuf/sls/RawLogGroup.cpp diff --git a/core/log_pb/RawLogGroup.h b/core/protobuf/sls/RawLogGroup.h similarity index 100% rename from core/log_pb/RawLogGroup.h rename to core/protobuf/sls/RawLogGroup.h diff --git a/core/log_pb/checkpoint.proto b/core/protobuf/sls/checkpoint.proto similarity index 100% rename from core/log_pb/checkpoint.proto rename to core/protobuf/sls/checkpoint.proto diff --git a/core/log_pb/logtail_buffer_meta.proto b/core/protobuf/sls/logtail_buffer_meta.proto similarity index 100% rename from core/log_pb/logtail_buffer_meta.proto rename to core/protobuf/sls/logtail_buffer_meta.proto diff --git a/core/log_pb/metric.proto b/core/protobuf/sls/metric.proto similarity index 100% rename from core/log_pb/metric.proto rename to core/protobuf/sls/metric.proto diff --git a/core/log_pb/sls_logs.proto b/core/protobuf/sls/sls_logs.proto similarity index 100% rename from core/log_pb/sls_logs.proto rename to core/protobuf/sls/sls_logs.proto diff --git a/core/sender/FlusherRunner.cpp b/core/runner/FlusherRunner.cpp similarity index 93% rename from core/sender/FlusherRunner.cpp rename to core/runner/FlusherRunner.cpp index 1f1285111d..88316bce20 100644 --- a/core/sender/FlusherRunner.cpp +++ b/core/runner/FlusherRunner.cpp @@ -12,24 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "sender/FlusherRunner.h" +#include "runner/FlusherRunner.h" #include "app_config/AppConfig.h" #include "application/Application.h" #include "common/LogtailCommonFlags.h" #include "common/StringTools.h" -#include "flusher/sls/DiskBufferWriter.h" +#include "plugin/flusher/sls/DiskBufferWriter.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" -#include "plugin/interface/HttpFlusher.h" -#include "queue/QueueKeyManager.h" -#include "queue/SenderQueueItem.h" -#include "queue/SenderQueueManager.h" +#include "pipeline/plugin/interface/HttpFlusher.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SenderQueueItem.h" +#include "pipeline/queue/SenderQueueManager.h" #include "common/http/HttpRequest.h" -#include "sink/http/HttpSink.h" +#include "runner/sink/http/HttpSink.h" // TODO: temporarily used here -#include "flusher/sls/PackIdManager.h" -#include "flusher/sls/SLSClientManager.h" +#include "plugin/flusher/sls/PackIdManager.h" +#include "plugin/flusher/sls/SLSClientManager.h" using namespace std; diff --git a/core/sender/FlusherRunner.h b/core/runner/FlusherRunner.h similarity index 93% rename from core/sender/FlusherRunner.h rename to core/runner/FlusherRunner.h index 40ac6f52f1..fb7347cddb 100644 --- a/core/sender/FlusherRunner.h +++ b/core/runner/FlusherRunner.h @@ -20,9 +20,9 @@ #include #include -#include "plugin/interface/Flusher.h" -#include "queue/SenderQueueItem.h" -#include "sink/SinkType.h" +#include "pipeline/plugin/interface/Flusher.h" +#include "pipeline/queue/SenderQueueItem.h" +#include "runner/sink/SinkType.h" namespace logtail { diff --git a/core/processor/daemon/LogProcess.cpp b/core/runner/LogProcess.cpp similarity index 97% rename from core/processor/daemon/LogProcess.cpp rename to core/runner/LogProcess.cpp index 6c4b47d7e5..08b4dfb47c 100644 --- a/core/processor/daemon/LogProcess.cpp +++ b/core/runner/LogProcess.cpp @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "processor/daemon/LogProcess.h" +#include "runner/LogProcess.h" #include "app_config/AppConfig.h" -#include "batch/TimeoutFlushManager.h" +#include "pipeline/batch/TimeoutFlushManager.h" #include "common/Flags.h" #include "go_pipeline/LogtailPlugin.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "pipeline/PipelineManager.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" DECLARE_FLAG_INT32(max_send_log_group_size); @@ -137,7 +137,7 @@ bool LogProcess::FlushOut(int32_t waitMs) { } void* LogProcess::ProcessLoop(int32_t threadNo) { - LOG_DEBUG(sLogger, ("LogProcessThread", "Start")("threadNo", threadNo)); + LOG_DEBUG(sLogger, ("runner/LogProcess.hread", "Start")("threadNo", threadNo)); static int32_t lastMergeTime = 0; static atomic_int s_processCount{0}; static atomic_long s_processBytes{0}; @@ -296,7 +296,7 @@ void* LogProcess::ProcessLoop(int32_t threadNo) { } } } - LOG_WARNING(sLogger, ("LogProcessThread", "Exit")("threadNo", threadNo)); + LOG_WARNING(sLogger, ("runner/LogProcess.hread", "Exit")("threadNo", threadNo)); return NULL; } diff --git a/core/processor/daemon/LogProcess.h b/core/runner/LogProcess.h similarity index 98% rename from core/processor/daemon/LogProcess.h rename to core/runner/LogProcess.h index 0bc9561b5b..c5ddf02c67 100644 --- a/core/processor/daemon/LogProcess.h +++ b/core/runner/LogProcess.h @@ -24,7 +24,7 @@ #include "common/Thread.h" #include "models/PipelineEventGroup.h" #include "monitor/Monitor.h" -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { diff --git a/core/sink/Sink.h b/core/runner/sink/Sink.h similarity index 100% rename from core/sink/Sink.h rename to core/runner/sink/Sink.h diff --git a/core/sink/SinkType.h b/core/runner/sink/SinkType.h similarity index 100% rename from core/sink/SinkType.h rename to core/runner/sink/SinkType.h diff --git a/core/sink/http/HttpSink.cpp b/core/runner/sink/http/HttpSink.cpp similarity index 93% rename from core/sink/http/HttpSink.cpp rename to core/runner/sink/http/HttpSink.cpp index 932037e6aa..dd1103b52e 100644 --- a/core/sink/http/HttpSink.cpp +++ b/core/runner/sink/http/HttpSink.cpp @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "sink/http/HttpSink.h" +#include "runner/sink/http/HttpSink.h" #include "app_config/AppConfig.h" #include "common/StringTools.h" #include "common/http/Curl.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" -#include "plugin/interface/HttpFlusher.h" -#include "queue/QueueKeyManager.h" -#include "queue/SenderQueueItem.h" -#include "sender/FlusherRunner.h" +#include "pipeline/plugin/interface/HttpFlusher.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SenderQueueItem.h" +#include "runner/FlusherRunner.h" using namespace std; @@ -213,6 +213,11 @@ void HttpSink::HandleCompletedRequests() { "try cnt", request->mTryCnt)("errMsg", curl_easy_strerror(msg->data.result))( "config-flusher-dst", QueueKeyManager::GetInstance()->GetName(request->mItem->mFlusher->GetQueueKey()))); + // free first,becase mPrivateData will be reset in AddRequestToClient + if (request->mPrivateData) { + curl_slist_free_all((curl_slist*)request->mPrivateData); + request->mPrivateData = nullptr; + } AddRequestToClient(unique_ptr(request)); requestReused = true; } else { @@ -222,13 +227,12 @@ void HttpSink::HandleCompletedRequests() { } break; } - - if (request->mPrivateData) { - curl_slist_free_all((curl_slist*)request->mPrivateData); - } curl_multi_remove_handle(mClient, handler); curl_easy_cleanup(handler); if (!requestReused) { + if (request->mPrivateData) { + curl_slist_free_all((curl_slist*)request->mPrivateData); + } delete request; } } diff --git a/core/sink/http/HttpSink.h b/core/runner/sink/http/HttpSink.h similarity index 95% rename from core/sink/http/HttpSink.h rename to core/runner/sink/http/HttpSink.h index bf1d6d871d..6e11b84855 100644 --- a/core/sink/http/HttpSink.h +++ b/core/runner/sink/http/HttpSink.h @@ -23,8 +23,8 @@ #include #include -#include "sink/Sink.h" -#include "sink/http/HttpSinkRequest.h" +#include "runner/sink/Sink.h" +#include "runner/sink/http/HttpSinkRequest.h" namespace logtail { diff --git a/core/sink/http/HttpSinkRequest.h b/core/runner/sink/http/HttpSinkRequest.h similarity index 96% rename from core/sink/http/HttpSinkRequest.h rename to core/runner/sink/http/HttpSinkRequest.h index c8d6f2140f..9f00431f0c 100644 --- a/core/sink/http/HttpSinkRequest.h +++ b/core/runner/sink/http/HttpSinkRequest.h @@ -17,7 +17,7 @@ #pragma once #include "common/http/HttpRequest.h" -#include "queue/SenderQueueItem.h" +#include "pipeline/queue/SenderQueueItem.h" namespace logtail { diff --git a/core/sdk/Client.h b/core/sdk/Client.h index 4be3f32449..6203b8350d 100644 --- a/core/sdk/Client.h +++ b/core/sdk/Client.h @@ -20,8 +20,8 @@ #include "Common.h" #include "CurlImp.h" -#include "sink/http/HttpSinkRequest.h" -#include "log_pb/sls_logs.pb.h" +#include "runner/sink/http/HttpSinkRequest.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { namespace sdk { diff --git a/core/unittest/UnittestHelper.h b/core/unittest/UnittestHelper.h index e19221fd9e..c6b5945371 100644 --- a/core/unittest/UnittestHelper.h +++ b/core/unittest/UnittestHelper.h @@ -19,7 +19,7 @@ #include #include #include "logger/Logger.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { diff --git a/core/unittest/app_config/AppConfigUnittestLegal.cpp b/core/unittest/app_config/AppConfigUnittestLegal.cpp index 5e6552969b..e95028e82b 100644 --- a/core/unittest/app_config/AppConfigUnittestLegal.cpp +++ b/core/unittest/app_config/AppConfigUnittestLegal.cpp @@ -18,7 +18,7 @@ #include "common/FileSystemUtil.h" #include "common/RuntimeUtil.h" #include "app_config/AppConfig.h" -#include "reader/LogFileReader.h" +#include "file_server/reader/LogFileReader.h" DECLARE_FLAG_STRING(ilogtail_config); DECLARE_FLAG_INT32(logreader_filedeleted_remove_interval); diff --git a/core/unittest/batch/BatchItemUnittest.cpp b/core/unittest/batch/BatchItemUnittest.cpp index beed51e221..71d2d7fa59 100644 --- a/core/unittest/batch/BatchItemUnittest.cpp +++ b/core/unittest/batch/BatchItemUnittest.cpp @@ -13,7 +13,7 @@ // limitations under the License. -#include "batch/BatchItem.h" +#include "pipeline/batch/BatchItem.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/batch/BatchStatusUnittest.cpp b/core/unittest/batch/BatchStatusUnittest.cpp index d91a8d5caf..23932bef46 100644 --- a/core/unittest/batch/BatchStatusUnittest.cpp +++ b/core/unittest/batch/BatchStatusUnittest.cpp @@ -13,7 +13,7 @@ // limitations under the License. -#include "batch/BatchStatus.h" +#include "pipeline/batch/BatchStatus.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/batch/BatcherUnittest.cpp b/core/unittest/batch/BatcherUnittest.cpp index 02ed7ae581..8d40721565 100644 --- a/core/unittest/batch/BatcherUnittest.cpp +++ b/core/unittest/batch/BatcherUnittest.cpp @@ -13,7 +13,7 @@ // limitations under the License. -#include "batch/Batcher.h" +#include "pipeline/batch/Batcher.h" #include "common/JsonUtil.h" #include "unittest/Unittest.h" #include "unittest/plugin/PluginMock.h" diff --git a/core/unittest/batch/FlushStrategyUnittest.cpp b/core/unittest/batch/FlushStrategyUnittest.cpp index 2e043ba12e..09d1c33ab5 100644 --- a/core/unittest/batch/FlushStrategyUnittest.cpp +++ b/core/unittest/batch/FlushStrategyUnittest.cpp @@ -13,8 +13,8 @@ // limitations under the License. -#include "batch/BatchStatus.h" -#include "batch/FlushStrategy.h" +#include "pipeline/batch/BatchStatus.h" +#include "pipeline/batch/FlushStrategy.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/batch/TimeoutFlushManagerUnittest.cpp b/core/unittest/batch/TimeoutFlushManagerUnittest.cpp index 21732f0105..0a437d0271 100644 --- a/core/unittest/batch/TimeoutFlushManagerUnittest.cpp +++ b/core/unittest/batch/TimeoutFlushManagerUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "batch/TimeoutFlushManager.h" +#include "pipeline/batch/TimeoutFlushManager.h" #include "unittest/Unittest.h" #include "unittest/plugin/PluginMock.h" diff --git a/core/unittest/checkpoint/CheckpointManagerV2Unittest.cpp b/core/unittest/checkpoint/CheckpointManagerV2Unittest.cpp index cd733ef36c..e26e676d5e 100644 --- a/core/unittest/checkpoint/CheckpointManagerV2Unittest.cpp +++ b/core/unittest/checkpoint/CheckpointManagerV2Unittest.cpp @@ -15,7 +15,7 @@ #include "unittest/Unittest.h" #include "common/Flags.h" #include "app_config/AppConfig.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "checkpoint/CheckpointManagerV2.h" DECLARE_FLAG_INT32(logtail_checkpoint_check_gc_interval_sec); diff --git a/core/unittest/compression/CompressorFactoryUnittest.cpp b/core/unittest/compression/CompressorFactoryUnittest.cpp index 2fe0365fe5..7093a37ff3 100644 --- a/core/unittest/compression/CompressorFactoryUnittest.cpp +++ b/core/unittest/compression/CompressorFactoryUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "compression/CompressorFactory.h" +#include "pipeline/compression/CompressorFactory.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/compression/LZ4CompressorUnittest.cpp b/core/unittest/compression/LZ4CompressorUnittest.cpp index 4e83e18c57..7bd62300f7 100644 --- a/core/unittest/compression/LZ4CompressorUnittest.cpp +++ b/core/unittest/compression/LZ4CompressorUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "compression/LZ4Compressor.h" +#include "pipeline/compression/LZ4Compressor.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/compression/ZstdCompressorUnittest.cpp b/core/unittest/compression/ZstdCompressorUnittest.cpp index 3bc0edbaea..6362907173 100644 --- a/core/unittest/compression/ZstdCompressorUnittest.cpp +++ b/core/unittest/compression/ZstdCompressorUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "compression/ZstdCompressor.h" +#include "pipeline/compression/ZstdCompressor.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/config/CommonConfigProviderUnittest.cpp b/core/unittest/config/CommonConfigProviderUnittest.cpp index a2b11e4a68..d68f049327 100644 --- a/core/unittest/config/CommonConfigProviderUnittest.cpp +++ b/core/unittest/config/CommonConfigProviderUnittest.cpp @@ -23,7 +23,7 @@ #include "config/watcher/ConfigWatcher.h" #include "gmock/gmock.h" #include "pipeline/PipelineManager.h" -#include "pipeline/ProcessConfigManager.h" +#include "pipeline/InstanceConfigManager.h" #include "unittest/Unittest.h" using namespace testing; @@ -78,7 +78,7 @@ class CommonConfigProviderUnittest : public ::testing::Test { provider.Init("common_v2"); provider.Stop(); bfs::remove_all(provider.mPipelineSourceDir.string()); - bfs::remove_all(provider.mProcessSourceDir.string()); + bfs::remove_all(provider.mInstanceSourceDir.string()); } // 在每个测试用例结束后的清理 @@ -87,7 +87,7 @@ class CommonConfigProviderUnittest : public ::testing::Test { provider.Init("common_v2"); provider.Stop(); bfs::remove_all(provider.mPipelineSourceDir.string()); - bfs::remove_all(provider.mProcessSourceDir.string()); + bfs::remove_all(provider.mInstanceSourceDir.string()); } void TestInit(); @@ -278,7 +278,7 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { heartbeatReq.ParseFromString(reqBody); APSARA_TEST_EQUAL(heartbeatReq.sequence_num(), sequence_num); sequence_num++; - APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsProcessConfig); + APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsInstanceConfig); APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsPipelineConfig); APSARA_TEST_EQUAL(heartbeatReq.instance_id(), provider.GetInstanceId()); APSARA_TEST_EQUAL(heartbeatReq.agent_type(), "LoongCollector"); @@ -346,9 +346,9 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { )"); } { - auto processconfig = heartbeatRespPb.mutable_process_config_updates(); - auto configDetail = processconfig->Add(); - configDetail->set_name("processconfig1"); + auto instanceconfig = heartbeatRespPb.mutable_instance_config_updates(); + auto configDetail = instanceconfig->Add(); + configDetail->set_name("instanceconfig1"); configDetail->set_version(1); configDetail->set_detail(R"( { @@ -356,8 +356,8 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { "max_bytes_per_sec": 100012031023 } )"); - configDetail = processconfig->Add(); - configDetail->set_name("processconfig2"); + configDetail = instanceconfig->Add(); + configDetail->set_name("instanceconfig2"); configDetail->set_version(1); configDetail->set_detail(R"( { @@ -433,25 +433,25 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { APSARA_TEST_EQUAL(PipelineManager::GetInstance()->GetAllConfigNames()[0], "config1"); - APSARA_TEST_EQUAL(provider.mProcessConfigInfoMap.size(), 2); - APSARA_TEST_EQUAL(provider.mProcessConfigInfoMap["processconfig1"].status, ConfigFeedbackStatus::APPLYING); - APSARA_TEST_EQUAL(provider.mProcessConfigInfoMap["processconfig2"].status, ConfigFeedbackStatus::FAILED); - - // 处理 processconfig - ProcessConfigDiff processConfigDiff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); - ProcessConfigManager::GetInstance()->UpdateProcessConfigs(processConfigDiff); - APSARA_TEST_TRUE(!processConfigDiff.IsEmpty()); - APSARA_TEST_EQUAL(1U, processConfigDiff.mAdded.size()); - APSARA_TEST_EQUAL(processConfigDiff.mAdded[0].mName, "processconfig1"); - APSARA_TEST_EQUAL(ProcessConfigManager::GetInstance()->GetAllConfigNames().size(), 1); - APSARA_TEST_EQUAL(ProcessConfigManager::GetInstance()->GetAllConfigNames()[0], "processconfig1"); - // 再次处理 processconfig - processConfigDiff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); - ProcessConfigManager::GetInstance()->UpdateProcessConfigs(processConfigDiff); - APSARA_TEST_TRUE(processConfigDiff.IsEmpty()); - APSARA_TEST_TRUE(processConfigDiff.mAdded.empty()); - APSARA_TEST_EQUAL(ProcessConfigManager::GetInstance()->GetAllConfigNames().size(), 1); - APSARA_TEST_EQUAL(ProcessConfigManager::GetInstance()->GetAllConfigNames()[0], "processconfig1"); + APSARA_TEST_EQUAL(provider.mInstanceConfigInfoMap.size(), 2); + APSARA_TEST_EQUAL(provider.mInstanceConfigInfoMap["instanceconfig1"].status, ConfigFeedbackStatus::APPLYING); + APSARA_TEST_EQUAL(provider.mInstanceConfigInfoMap["instanceconfig2"].status, ConfigFeedbackStatus::FAILED); + + // 处理 instanceconfig + InstanceConfigDiff instanceConfigDiff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); + InstanceConfigManager::GetInstance()->UpdateInstanceConfigs(instanceConfigDiff); + APSARA_TEST_TRUE(!instanceConfigDiff.IsEmpty()); + APSARA_TEST_EQUAL(1U, instanceConfigDiff.mAdded.size()); + APSARA_TEST_EQUAL(instanceConfigDiff.mAdded[0].mName, "instanceconfig1"); + APSARA_TEST_EQUAL(InstanceConfigManager::GetInstance()->GetAllConfigNames().size(), 1); + APSARA_TEST_EQUAL(InstanceConfigManager::GetInstance()->GetAllConfigNames()[0], "instanceconfig1"); + // 再次处理 instanceconfig + instanceConfigDiff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); + InstanceConfigManager::GetInstance()->UpdateInstanceConfigs(instanceConfigDiff); + APSARA_TEST_TRUE(instanceConfigDiff.IsEmpty()); + APSARA_TEST_TRUE(instanceConfigDiff.mAdded.empty()); + APSARA_TEST_EQUAL(InstanceConfigManager::GetInstance()->GetAllConfigNames().size(), 1); + APSARA_TEST_EQUAL(InstanceConfigManager::GetInstance()->GetAllConfigNames()[0], "instanceconfig1"); provider.Stop(); } @@ -461,8 +461,8 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { provider.Init("common_v2"); APSARA_TEST_EQUAL(provider.mPipelineConfigInfoMap.size(), 1); APSARA_TEST_EQUAL(provider.mPipelineConfigInfoMap["config1"].status, ConfigFeedbackStatus::APPLYING); - APSARA_TEST_EQUAL(provider.mProcessConfigInfoMap.size(), 1); - APSARA_TEST_EQUAL(provider.mProcessConfigInfoMap["processconfig1"].status, ConfigFeedbackStatus::APPLYING); + APSARA_TEST_EQUAL(provider.mInstanceConfigInfoMap.size(), 1); + APSARA_TEST_EQUAL(provider.mInstanceConfigInfoMap["instanceconfig1"].status, ConfigFeedbackStatus::APPLYING); provider.Stop(); } // delete config @@ -493,7 +493,7 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { heartbeatReq.ParseFromString(reqBody); APSARA_TEST_EQUAL(heartbeatReq.sequence_num(), sequence_num); sequence_num++; - APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsProcessConfig); + APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsInstanceConfig); APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsPipelineConfig); APSARA_TEST_EQUAL(heartbeatReq.instance_id(), provider.GetInstanceId()); APSARA_TEST_EQUAL(heartbeatReq.agent_type(), "LoongCollector"); @@ -561,11 +561,11 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { } )"); } - // processconfig + // instanceconfig { - auto processconfig = heartbeatRespPb.mutable_process_config_updates(); - auto configDetail = processconfig->Add(); - configDetail->set_name("processconfig1"); + auto instanceconfig = heartbeatRespPb.mutable_instance_config_updates(); + auto configDetail = instanceconfig->Add(); + configDetail->set_name("instanceconfig1"); configDetail->set_version(-1); configDetail->set_detail(R"( { @@ -573,8 +573,8 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { "max_bytes_per_sec": 100012031023 } )"); - configDetail = processconfig->Add(); - configDetail->set_name("processconfig2"); + configDetail = instanceconfig->Add(); + configDetail->set_name("instanceconfig2"); configDetail->set_version(-1); configDetail->set_detail(R"( { @@ -646,20 +646,20 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() { APSARA_TEST_TRUE(pipelineConfigDiff.mRemoved.empty()); APSARA_TEST_TRUE(PipelineManager::GetInstance()->GetAllConfigNames().empty()); - APSARA_TEST_TRUE(provider.mProcessConfigInfoMap.empty()); - // 处理processConfigDiff - ProcessConfigDiff processConfigDiff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); - ProcessConfigManager::GetInstance()->UpdateProcessConfigs(processConfigDiff); - APSARA_TEST_TRUE(ProcessConfigManager::GetInstance()->GetAllConfigNames().empty()); - APSARA_TEST_EQUAL(1U, processConfigDiff.mRemoved.size()); - APSARA_TEST_EQUAL(processConfigDiff.mRemoved[0], "processconfig1"); - - // 再次处理processConfigDiff - processConfigDiff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); - ProcessConfigManager::GetInstance()->UpdateProcessConfigs(processConfigDiff); - APSARA_TEST_TRUE(ProcessConfigManager::GetInstance()->GetAllConfigNames().empty()); - APSARA_TEST_TRUE(processConfigDiff.IsEmpty()); - APSARA_TEST_TRUE(processConfigDiff.mRemoved.empty()); + APSARA_TEST_TRUE(provider.mInstanceConfigInfoMap.empty()); + // 处理instanceConfigDiff + InstanceConfigDiff instanceConfigDiff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); + InstanceConfigManager::GetInstance()->UpdateInstanceConfigs(instanceConfigDiff); + APSARA_TEST_TRUE(InstanceConfigManager::GetInstance()->GetAllConfigNames().empty()); + APSARA_TEST_EQUAL(1U, instanceConfigDiff.mRemoved.size()); + APSARA_TEST_EQUAL(instanceConfigDiff.mRemoved[0], "instanceconfig1"); + + // 再次处理instanceConfigDiff + instanceConfigDiff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); + InstanceConfigManager::GetInstance()->UpdateInstanceConfigs(instanceConfigDiff); + APSARA_TEST_TRUE(InstanceConfigManager::GetInstance()->GetAllConfigNames().empty()); + APSARA_TEST_TRUE(instanceConfigDiff.IsEmpty()); + APSARA_TEST_TRUE(instanceConfigDiff.mRemoved.empty()); provider.Stop(); } diff --git a/core/unittest/config/ConfigContainerUnittest.cpp b/core/unittest/config/ConfigContainerUnittest.cpp index 2058fe8593..b85f6c49f9 100644 --- a/core/unittest/config/ConfigContainerUnittest.cpp +++ b/core/unittest/config/ConfigContainerUnittest.cpp @@ -21,8 +21,8 @@ #include "app_config/AppConfig.h" #include "common/Flags.h" -#include "config_manager/ConfigManager.h" -#include "event/Event.h" +#include "file_server/ConfigManager.h" +#include "file_server/event/Event.h" #include "file_server/FileServer.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/config/ConfigMatchUnittest.cpp b/core/unittest/config/ConfigMatchUnittest.cpp index 9f95bd683d..8402e4d215 100644 --- a/core/unittest/config/ConfigMatchUnittest.cpp +++ b/core/unittest/config/ConfigMatchUnittest.cpp @@ -37,14 +37,14 @@ #include #include "common/Flags.h" #include "common/FileSystemUtil.h" -#include "event_handler/EventHandler.h" -#include "polling/PollingEventQueue.h" -#include "controller/EventDispatcher.h" +#include "file_server/event_handler/EventHandler.h" +#include "file_server/polling/PollingEventQueue.h" +#include "file_server/EventDispatcher.h" #include "app_config/AppConfig.h" -#include "config_manager/ConfigManager.h" -#include "reader/LogFileReader.h" -#include "event_handler/LogInput.h" -#include "event/Event.h" +#include "file_server/ConfigManager.h" +#include "file_server/reader/LogFileReader.h" +#include "file_server/event_handler/LogInput.h" +#include "file_server/event/Event.h" #include "logger/Logger.h" using namespace std; diff --git a/core/unittest/config/ConfigUpdateUnittest.cpp b/core/unittest/config/ConfigUpdateUnittest.cpp index e74a538fc8..9f253ba11c 100644 --- a/core/unittest/config/ConfigUpdateUnittest.cpp +++ b/core/unittest/config/ConfigUpdateUnittest.cpp @@ -22,7 +22,7 @@ #include "config/watcher/ConfigWatcher.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineManager.h" -#include "plugin/PluginRegistry.h" +#include "pipeline/plugin/PluginRegistry.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/config/ConfigUpdatorUnittest.cpp b/core/unittest/config/ConfigUpdatorUnittest.cpp index 2044e064c4..c8c9b8fbdb 100644 --- a/core/unittest/config/ConfigUpdatorUnittest.cpp +++ b/core/unittest/config/ConfigUpdatorUnittest.cpp @@ -23,14 +23,14 @@ #include #include #include -#include "event_handler/EventHandler.h" -#include "config_manager/ConfigManager.h" -#include "reader/LogFileReader.h" +#include "file_server/event_handler/EventHandler.h" +#include "file_server/ConfigManager.h" +#include "file_server/reader/LogFileReader.h" #include "AppConfig.h" #include "Monitor.h" #include "EventDispatcher.h" #include "CheckPointManager.h" -#include "LogInput.h" +#include "file_server/event_handler/LogInput.h" #include "Sender.h" #include "sls_logs.pb.h" #include "LogtailAlarm.h" diff --git a/core/unittest/config/ConfigWatcherUnittest.cpp b/core/unittest/config/ConfigWatcherUnittest.cpp index 7562c09023..06592fd275 100644 --- a/core/unittest/config/ConfigWatcherUnittest.cpp +++ b/core/unittest/config/ConfigWatcherUnittest.cpp @@ -17,7 +17,7 @@ #include "config/ConfigDiff.h" #include "config/watcher/ConfigWatcher.h" -#include "plugin/PluginRegistry.h" +#include "pipeline/plugin/PluginRegistry.h" #include "unittest/Unittest.h" using namespace std; @@ -33,18 +33,18 @@ class ConfigWatcherUnittest : public testing::Test { protected: void SetUp() override { ConfigWatcher::GetInstance()->AddPipelineSource(configDir.string()); - ConfigWatcher::GetInstance()->AddProcessSource(processConfigDir.string()); + ConfigWatcher::GetInstance()->AddInstanceSource(instanceConfigDir.string()); } void TearDown() override { ConfigWatcher::GetInstance()->ClearEnvironment(); } private: static const filesystem::path configDir; - static const filesystem::path processConfigDir; + static const filesystem::path instanceConfigDir; }; const filesystem::path ConfigWatcherUnittest::configDir = "./config"; -const filesystem::path ConfigWatcherUnittest::processConfigDir = "./processconfig"; +const filesystem::path ConfigWatcherUnittest::instanceConfigDir = "./instanceconfig"; void ConfigWatcherUnittest::InvalidConfigDirFound() const { { @@ -57,13 +57,13 @@ void ConfigWatcherUnittest::InvalidConfigDirFound() const { filesystem::remove("config"); } { - ProcessConfigDiff diff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); + InstanceConfigDiff diff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); APSARA_TEST_TRUE(diff.IsEmpty()); - { ofstream fout("processconfig"); } - diff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); + { ofstream fout("instanceconfig"); } + diff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); APSARA_TEST_TRUE(diff.IsEmpty()); - filesystem::remove("processconfig"); + filesystem::remove("instanceconfig"); } } @@ -83,18 +83,18 @@ void ConfigWatcherUnittest::InvalidConfigFileFound() const { filesystem::remove_all(configDir); } { - filesystem::create_directories(processConfigDir); + filesystem::create_directories(instanceConfigDir); - filesystem::create_directories(processConfigDir / "dir"); - { ofstream fout(processConfigDir / "unsupported_extenstion.zip"); } - { ofstream fout(processConfigDir / "empty_file.json"); } + filesystem::create_directories(instanceConfigDir / "dir"); + { ofstream fout(instanceConfigDir / "unsupported_extenstion.zip"); } + { ofstream fout(instanceConfigDir / "empty_file.json"); } { - ofstream fout(processConfigDir / "invalid_format.json"); + ofstream fout(instanceConfigDir / "invalid_format.json"); fout << "[}"; } - ProcessConfigDiff diff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); + InstanceConfigDiff diff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); APSARA_TEST_TRUE(diff.IsEmpty()); - filesystem::remove_all(processConfigDir); + filesystem::remove_all(instanceConfigDir); } } @@ -137,10 +137,10 @@ void ConfigWatcherUnittest::DuplicateConfigs() const { } { PluginRegistry::GetInstance()->LoadPlugins(); - ConfigWatcher::GetInstance()->AddProcessSource("dir1"); - ConfigWatcher::GetInstance()->AddProcessSource("dir2"); + ConfigWatcher::GetInstance()->AddInstanceSource("dir1"); + ConfigWatcher::GetInstance()->AddInstanceSource("dir2"); - filesystem::create_directories("processconfig"); + filesystem::create_directories("instanceconfig"); filesystem::create_directories("dir1"); filesystem::create_directories("dir2"); @@ -156,13 +156,13 @@ void ConfigWatcherUnittest::DuplicateConfigs() const { )"; } { ofstream fout("dir2/config.json"); } - ProcessConfigDiff diff = ConfigWatcher::GetInstance()->CheckProcessConfigDiff(); + InstanceConfigDiff diff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); APSARA_TEST_FALSE(diff.IsEmpty()); APSARA_TEST_EQUAL(1U, diff.mAdded.size()); filesystem::remove_all("dir1"); filesystem::remove_all("dir2"); - filesystem::remove_all("processconfig"); + filesystem::remove_all("instanceconfig"); PluginRegistry::GetInstance()->UnloadPlugins(); } } diff --git a/core/unittest/config/PipelineConfigUnittest.cpp b/core/unittest/config/PipelineConfigUnittest.cpp index d1fcd9a9f9..577113ebc7 100644 --- a/core/unittest/config/PipelineConfigUnittest.cpp +++ b/core/unittest/config/PipelineConfigUnittest.cpp @@ -19,7 +19,7 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" -#include "plugin/PluginRegistry.h" +#include "pipeline/plugin/PluginRegistry.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/container_manager/ContainerDiscoveryOptionsUnittest.cpp b/core/unittest/container_manager/ContainerDiscoveryOptionsUnittest.cpp index d38356cdc1..2f40343a24 100644 --- a/core/unittest/container_manager/ContainerDiscoveryOptionsUnittest.cpp +++ b/core/unittest/container_manager/ContainerDiscoveryOptionsUnittest.cpp @@ -31,7 +31,7 @@ class ContainerDiscoveryOptionsUnittest : public testing::Test { void OnSuccessfulInit() const; private: - const string pluginName = "test"; + const string pluginType = "test"; PipelineContext ctx; }; @@ -92,7 +92,7 @@ void ContainerDiscoveryOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new ContainerDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL("default", config->mContainerFilters.mK8sNamespaceRegex); APSARA_TEST_EQUAL("pod", config->mContainerFilters.mK8sPodRegex); APSARA_TEST_EQUAL(1U, config->mContainerFilters.mIncludeK8sLabel.size()); @@ -127,7 +127,7 @@ void ContainerDiscoveryOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new ContainerDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL("", config->mContainerFilters.mK8sNamespaceRegex); APSARA_TEST_EQUAL("", config->mContainerFilters.mK8sPodRegex); APSARA_TEST_EQUAL(0U, config->mContainerFilters.mIncludeK8sLabel.size()); diff --git a/core/unittest/controller/EventDispatcherDirUnittest.cpp b/core/unittest/controller/EventDispatcherDirUnittest.cpp index 628b296f49..f08938ac52 100644 --- a/core/unittest/controller/EventDispatcherDirUnittest.cpp +++ b/core/unittest/controller/EventDispatcherDirUnittest.cpp @@ -19,9 +19,9 @@ #include #include #include "common/Flags.h" -#include "controller/EventDispatcher.h" -#include "event/Event.h" -#include "event_handler/EventHandler.h" +#include "file_server/EventDispatcher.h" +#include "file_server/event/Event.h" +#include "file_server/event_handler/EventHandler.h" using namespace std; DECLARE_FLAG_STRING(ilogtail_config); diff --git a/core/unittest/ebpf/eBPFServerUnittest.cpp b/core/unittest/ebpf/eBPFServerUnittest.cpp index f512485f84..03fdcd15d2 100644 --- a/core/unittest/ebpf/eBPFServerUnittest.cpp +++ b/core/unittest/ebpf/eBPFServerUnittest.cpp @@ -44,6 +44,8 @@ class eBPFServerUnittest : public testing::Test { void TestEbpfParameters(); + void TestInitAndStop(); + protected: void SetUp() override { config_ = new eBPFAdminConfig; @@ -73,6 +75,7 @@ class eBPFServerUnittest : public testing::Test { void GenerateBatchMeasure(nami::NamiHandleBatchMeasureFunc cb); void GenerateBatchSpan(nami::NamiHandleBatchSpanFunc cb); void GenerateBatchEvent(nami::NamiHandleBatchDataEventFn cb, SecureEventType); + void GenerateBatchAppEvent(nami::NamiHandleBatchEventFunc cb); void writeLogtailConfigJSON(const Json::Value& v) { LOG_INFO(sLogger, ("writeLogtailConfigJSON", v.toStyledString())); OverwriteFile(STRING_FLAG(ilogtail_config), v.toStyledString()); @@ -417,6 +420,26 @@ void eBPFServerUnittest::GenerateBatchMeasure(nami::NamiHandleBatchMeasureFunc c cb(std::move(batch_app_measures), 100000); } +void eBPFServerUnittest::GenerateBatchAppEvent(nami::NamiHandleBatchEventFunc cb) { + std::vector> batch_app_events; + std::vector apps = {"a6rx69e8me@582846f37273cf8", "a6rx69e8me@582846f37273cf9", "a6rx69e8me@582846f37273c10"}; + + for (int i = 0 ; i < apps.size(); i ++) { // 3 apps + std::vector> appTags = {{"hh", "hh"}, {"e", "e"}, {"f", std::to_string(i)}}; + std::unique_ptr appEvent = std::make_unique(apps[i], std::move(appTags)); + for (int j = 0; j < 1000; j ++) { + std::vector> tags = {{"1", "1"}, {"2", "2"}, {"3",std::to_string(j)}}; + std::unique_ptr se = std::make_unique(std::move(tags), 0); + appEvent->AppendEvent(std::move(se)); + } + batch_app_events.emplace_back(std::move(appEvent)); + } + + if (cb) cb(std::move(batch_app_events)); + + return; +} + void eBPFServerUnittest::GenerateBatchSpan(nami::NamiHandleBatchSpanFunc cb) { std::vector> batch_app_spans; // agg for app level @@ -493,12 +516,16 @@ void eBPFServerUnittest::TestEnableNetworkPlugin() { "Type": "input_ebpf_sockettraceprobe_observer", "ProbeConfig": { + "EnableLog": true, + "EnableMetric": true, + "EnableSpan": true, "EnableProtocols": [ "http" ], "DisableProtocolParse": 1, "DisableConnStats": false, - "EnableConnTrackerDump": false + "EnableConnTrackerDump": false, + "EnableEvent": true, } } )"; @@ -517,7 +544,7 @@ void eBPFServerUnittest::TestEnableNetworkPlugin() { &network_option); EXPECT_TRUE(res); - auto conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig; + auto conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig.get(); auto network_conf = std::get(conf->config_); EXPECT_EQ(conf->plugin_type_, nami::PluginType::NETWORK_OBSERVE); EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_ENABLE_PROBE); @@ -548,19 +575,23 @@ void eBPFServerUnittest::TestEnableNetworkPlugin() { &ctx, &network_option); EXPECT_TRUE(res); - conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig; + conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig.get(); EXPECT_EQ(conf->plugin_type_, nami::PluginType::NETWORK_OBSERVE); EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_CONFIG_CHAGE); GenerateBatchMeasure(network_conf.measure_cb_); GenerateBatchSpan(network_conf.span_cb_); + GenerateBatchAppEvent(network_conf.event_cb_); auto after_conf = std::get(conf->config_); EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mMeterCB->mQueueKey, ctx.GetProcessQueueKey()); EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mSpanCB->mQueueKey, ctx.GetProcessQueueKey()); + EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mEventCB->mQueueKey, ctx.GetProcessQueueKey()); EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mMeterCB->mPluginIdx, 8); EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mSpanCB->mPluginIdx, 8); + EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mEventCB->mPluginIdx, 8); EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mMeterCB->mProcessTotalCnt, 19); EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mSpanCB->mProcessTotalCnt, 5); + EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mEventCB->mProcessTotalCnt, 3000); // do stop ebpf::eBPFServer::GetInstance()->DisablePlugin("test", nami::PluginType::NETWORK_OBSERVE); @@ -574,49 +605,37 @@ void eBPFServerUnittest::TestEnableProcessPlugin() { "Type": "input_ebpf_processprobe_security", "ProbeConfig": [ { - "NamespaceFilter": [ - { - "NamespaceType": "Pid", - "ValueList": [ - "4026531833" - ] - }, - { - "NamespaceType": "Mnt", - "ValueList": [ - "4026531834" - ] - } + "CallNameFilter": [ + "sys_enter_execve", + "disassociate_ctty", + "acct_process", + "wake_up_new_task" ] } ] } )"; - + std::string errorMsg; Json::Value configJson; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); std::cout << "1" << std::endl; SecurityOptions security_options; - security_options.Init(SecurityFilterType::PROCESS, configJson, &ctx, "input_ebpf_processprobe_security"); + security_options.Init(SecurityProbeType::PROCESS, configJson, &ctx, "input_ebpf_processprobe_security"); bool res = ebpf::eBPFServer::GetInstance()->EnablePlugin( "test", 0, nami::PluginType::PROCESS_SECURITY, &ctx, &security_options); EXPECT_TRUE(res); - auto conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig; + auto conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig.get(); EXPECT_EQ(conf->plugin_type_, nami::PluginType::PROCESS_SECURITY); EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_ENABLE_PROBE); auto process_conf = std::get(conf->config_); EXPECT_TRUE(process_conf.process_security_cb_ != nullptr); LOG_WARNING(sLogger, ("process_conf.options_ size", process_conf.options_.size())); EXPECT_EQ(process_conf.options_.size(), 1); - EXPECT_EQ(process_conf.options_[0].call_names_.size(), 0); - auto filter = std::get(process_conf.options_[0].filter_); - LOG_WARNING(sLogger, ("get filter", filter.mNamespaceFilter.size())); - EXPECT_EQ(filter.mNamespaceFilter.size(), 2); - EXPECT_EQ(filter.mNamespaceBlackFilter.size(), 0); + EXPECT_EQ(process_conf.options_[0].call_names_.size(), 4); // do suspend ebpf::eBPFServer::GetInstance()->SuspendPlugin("test", nami::PluginType::PROCESS_SECURITY); @@ -630,7 +649,7 @@ void eBPFServerUnittest::TestEnableProcessPlugin() { &ctx, &security_options); EXPECT_TRUE(res); - conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig; + conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig.get(); EXPECT_EQ(conf->plugin_type_, nami::PluginType::PROCESS_SECURITY); EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_CONFIG_CHAGE); auto after_conf = std::get(conf->config_); @@ -646,7 +665,7 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { "Type": "input_ebpf_sockettraceprobe_security", "ProbeConfig": [ { - "CallName": ["tcp_connect", "tcp_close"], + "CallNameFilter": ["tcp_connect", "tcp_close"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [80], @@ -655,7 +674,7 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { } }, { - "CallName": ["tcp_sendmsg"], + "CallNameFilter": ["tcp_sendmsg"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [80] @@ -669,14 +688,14 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { Json::Value configJson; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); SecurityOptions security_options; - security_options.Init(SecurityFilterType::NETWORK, configJson, &ctx, "input_ebpf_sockettraceprobe_security"); + security_options.Init(SecurityProbeType::NETWORK, configJson, &ctx, "input_ebpf_sockettraceprobe_security"); bool res = ebpf::eBPFServer::GetInstance()->EnablePlugin( "input_ebpf_sockettraceprobe_security", 5, nami::PluginType::NETWORK_SECURITY, &ctx, &security_options); EXPECT_TRUE(res); - auto conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig; + auto conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig.get(); EXPECT_EQ(conf->plugin_type_, nami::PluginType::NETWORK_SECURITY); EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_ENABLE_PROBE); auto inner_conf = std::get(conf->config_); @@ -701,7 +720,7 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { &ctx, &security_options); EXPECT_TRUE(res); - conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig; + conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig.get(); EXPECT_EQ(conf->plugin_type_, nami::PluginType::NETWORK_SECURITY); EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_CONFIG_CHAGE); @@ -712,45 +731,39 @@ void eBPFServerUnittest::TestEnableNetworkSecurePlugin() { EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mNetworkSecureCB->mProcessTotalCnt, 1000); } + + void eBPFServerUnittest::TestEnableFileSecurePlugin() { std::string configStr = R"( { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], + "CallNameFilter": ["security_file_permission"], "FilePathFilter": [ - { - "FilePath": "/etc", - "FileName": "passwd" - }, - { - "FilePath": "/etc", - "FileName": "shadow" - }, - { - "FilePath": "/bin" - } + "/etc/passwd", + "/etc/shadow", + "/bin" ] } ] } )"; - + std::string errorMsg; Json::Value configJson; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); std::cout << "1" << std::endl; SecurityOptions security_options; - security_options.Init(SecurityFilterType::FILE, configJson, &ctx, "input_ebpf_fileprobe_security"); + security_options.Init(SecurityProbeType::FILE, configJson, &ctx, "input_ebpf_fileprobe_security"); bool res = ebpf::eBPFServer::GetInstance()->EnablePlugin( "input_ebpf_fileprobe_security", 0, nami::PluginType::FILE_SECURITY, &ctx, &security_options); - EXPECT_EQ(std::get(security_options.mOptionList[0].filter_).mFileFilterItem.size(), 3); + EXPECT_EQ(std::get(security_options.mOptionList[0].filter_).mFilePathList.size(), 3); EXPECT_TRUE(res); - auto conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig; + auto conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig.get(); EXPECT_EQ(conf->plugin_type_, nami::PluginType::FILE_SECURITY); EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_ENABLE_PROBE); auto inner_conf = std::get(conf->config_); @@ -758,9 +771,9 @@ void eBPFServerUnittest::TestEnableFileSecurePlugin() { EXPECT_EQ(inner_conf.options_.size(), 1); EXPECT_EQ(inner_conf.options_[0].call_names_.size(), 1); auto filter = std::get(inner_conf.options_[0].filter_); - EXPECT_EQ(filter.mFileFilterItem.size(), 3); - EXPECT_EQ(filter.mFileFilterItem[0].mFileName, "passwd"); - EXPECT_EQ(filter.mFileFilterItem[0].mFilePath, "/etc"); + EXPECT_EQ(filter.mFilePathList.size(), 3); + EXPECT_EQ(filter.mFilePathList[0], "/etc/passwd"); + EXPECT_EQ(filter.mFilePathList[1], "/etc/shadow"); // do suspend ebpf::eBPFServer::GetInstance()->SuspendPlugin("test", nami::PluginType::FILE_SECURITY); @@ -774,7 +787,7 @@ void eBPFServerUnittest::TestEnableFileSecurePlugin() { &ctx, &security_options); EXPECT_TRUE(res); - conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig; + conf = ebpf::eBPFServer::GetInstance()->mSourceManager->mConfig.get(); EXPECT_EQ(conf->plugin_type_, nami::PluginType::FILE_SECURITY); EXPECT_EQ(conf->type, UpdataType::SECURE_UPDATE_TYPE_CONFIG_CHAGE); @@ -785,6 +798,19 @@ void eBPFServerUnittest::TestEnableFileSecurePlugin() { EXPECT_EQ(ebpf::eBPFServer::GetInstance()->mFileSecureCB->mProcessTotalCnt, 1000); } +void eBPFServerUnittest::TestInitAndStop() { + EXPECT_EQ(true, eBPFServer::GetInstance()->mInited); + eBPFServer::GetInstance()->Init(); + EXPECT_EQ(true, eBPFServer::GetInstance()->mInited); + eBPFServer::GetInstance()->Stop(); + EXPECT_EQ(false, eBPFServer::GetInstance()->mInited); + eBPFServer::GetInstance()->Stop(); + EXPECT_EQ(false, eBPFServer::GetInstance()->mInited); + EXPECT_EQ(nullptr, eBPFServer::GetInstance()->mSourceManager); + auto ret = eBPFServer::GetInstance()->HasRegisteredPlugins(); + EXPECT_EQ(false, ret); +} + UNIT_TEST_CASE(eBPFServerUnittest, TestDefaultEbpfParameters); UNIT_TEST_CASE(eBPFServerUnittest, TestDefaultAndLoadEbpfParameters); UNIT_TEST_CASE(eBPFServerUnittest, TestLoadEbpfParametersV1); @@ -794,6 +820,7 @@ UNIT_TEST_CASE(eBPFServerUnittest, TestEnableNetworkPlugin) UNIT_TEST_CASE(eBPFServerUnittest, TestEnableProcessPlugin) UNIT_TEST_CASE(eBPFServerUnittest, TestEnableNetworkSecurePlugin) UNIT_TEST_CASE(eBPFServerUnittest, TestEnableFileSecurePlugin) +UNIT_TEST_CASE(eBPFServerUnittest, TestInitAndStop) } } diff --git a/core/unittest/event/EventUnittest.cpp b/core/unittest/event/EventUnittest.cpp index 04befb135f..2644b4539b 100644 --- a/core/unittest/event/EventUnittest.cpp +++ b/core/unittest/event/EventUnittest.cpp @@ -19,7 +19,7 @@ #include #include #include "common/Flags.h" -#include "event/Event.h" +#include "file_server/event/Event.h" using namespace std; DECLARE_FLAG_STRING(ilogtail_config); diff --git a/core/unittest/event_handler/CreateModifyHandlerUnittest.cpp b/core/unittest/event_handler/CreateModifyHandlerUnittest.cpp index e4cfac6ade..906b432bd4 100644 --- a/core/unittest/event_handler/CreateModifyHandlerUnittest.cpp +++ b/core/unittest/event_handler/CreateModifyHandlerUnittest.cpp @@ -19,9 +19,9 @@ #include #include #include "common/Flags.h" -#include "config_manager/ConfigManager.h" -#include "event/Event.h" -#include "event_handler/EventHandler.h" +#include "file_server/ConfigManager.h" +#include "file_server/event/Event.h" +#include "file_server/event_handler/EventHandler.h" using namespace std; DECLARE_FLAG_STRING(ilogtail_config); diff --git a/core/unittest/event_handler/LogInputUnittest.cpp b/core/unittest/event_handler/LogInputUnittest.cpp index bade264e5c..17c79fb578 100644 --- a/core/unittest/event_handler/LogInputUnittest.cpp +++ b/core/unittest/event_handler/LogInputUnittest.cpp @@ -20,9 +20,9 @@ #include #include "common/Flags.h" #include "common/FileSystemUtil.h" -#include "polling/PollingEventQueue.h" -#include "event/Event.h" -#include "event_handler/LogInput.h" +#include "file_server/polling/PollingEventQueue.h" +#include "file_server/event/Event.h" +#include "file_server/event_handler/LogInput.h" using namespace std; DECLARE_FLAG_STRING(ilogtail_config); diff --git a/core/unittest/event_handler/ModifyHandlerUnittest.cpp b/core/unittest/event_handler/ModifyHandlerUnittest.cpp index dd2a611096..ea26051661 100644 --- a/core/unittest/event_handler/ModifyHandlerUnittest.cpp +++ b/core/unittest/event_handler/ModifyHandlerUnittest.cpp @@ -23,12 +23,12 @@ #include "common/Flags.h" #include "common/JsonUtil.h" #include "config/PipelineConfig.h" -#include "event/Event.h" -#include "event_handler/EventHandler.h" +#include "file_server/event/Event.h" +#include "file_server/event_handler/EventHandler.h" #include "file_server/FileServer.h" #include "pipeline/Pipeline.h" -#include "queue/ProcessQueueManager.h" -#include "reader/LogFileReader.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "file_server/reader/LogFileReader.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/file_source/FileDiscoveryOptionsUnittest.cpp b/core/unittest/file_source/FileDiscoveryOptionsUnittest.cpp index b18c555477..bed23e1854 100644 --- a/core/unittest/file_source/FileDiscoveryOptionsUnittest.cpp +++ b/core/unittest/file_source/FileDiscoveryOptionsUnittest.cpp @@ -34,7 +34,7 @@ class FileDiscoveryOptionsUnittest : public testing::Test { void TestFilePaths() const; private: - const string pluginName = "test"; + const string pluginType = "test"; PipelineContext ctx; }; @@ -54,7 +54,7 @@ void FileDiscoveryOptionsUnittest::OnSuccessfulInit() const { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); configJson["FilePaths"].append(Json::Value(filePath.string())); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(1U, config->mFilePaths.size()); APSARA_TEST_EQUAL(0, config->mMaxDirSearchDepth); APSARA_TEST_EQUAL(-1, config->mPreservedDirDepth); @@ -80,7 +80,7 @@ void FileDiscoveryOptionsUnittest::OnSuccessfulInit() const { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); configJson["FilePaths"].append(Json::Value(filePath.string())); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(1U, config->mFilePaths.size()); APSARA_TEST_EQUAL(0, config->mMaxDirSearchDepth); APSARA_TEST_EQUAL(0, config->mPreservedDirDepth); @@ -106,7 +106,7 @@ void FileDiscoveryOptionsUnittest::OnSuccessfulInit() const { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); configJson["FilePaths"].append(Json::Value(filePath.string())); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(1U, config->mFilePaths.size()); APSARA_TEST_EQUAL(0, config->mMaxDirSearchDepth); APSARA_TEST_EQUAL(-1, config->mPreservedDirDepth); @@ -132,7 +132,7 @@ void FileDiscoveryOptionsUnittest::OnSuccessfulInit() const { configJson["ExcludeFilePaths"].append(Json::Value(ex2.string())); configJson["ExcludeFilePaths"].append(Json::Value(ex3.string())); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(3U, config->mExcludeFilePaths.size()); APSARA_TEST_EQUAL(1U, config->mFilePathBlacklist.size()); APSARA_TEST_EQUAL(1U, config->mMLFilePathBlacklist.size()); @@ -152,7 +152,7 @@ void FileDiscoveryOptionsUnittest::OnSuccessfulInit() const { configJson["ExcludeFiles"].append(Json::Value(ex1.string())); configJson["ExcludeFiles"].append(Json::Value(ex2.string())); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(2U, config->mExcludeFiles.size()); APSARA_TEST_EQUAL(1U, config->mFileNameBlacklist.size()); APSARA_TEST_TRUE(config->mHasBlacklist); @@ -177,7 +177,7 @@ void FileDiscoveryOptionsUnittest::OnSuccessfulInit() const { configJson["ExcludeDirs"].append(Json::Value(ex4.string())); configJson["ExcludeDirs"].append(Json::Value(ex5.string())); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(5U, config->mExcludeDirs.size()); APSARA_TEST_EQUAL(1U, config->mMLWildcardDirPathBlacklist.size()); APSARA_TEST_EQUAL(2U, config->mWildcardDirPathBlacklist.size()); @@ -194,7 +194,7 @@ void FileDiscoveryOptionsUnittest::OnSuccessfulInit() const { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); configJson["FilePaths"].append(Json::Value(filePath.string())); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_FALSE(config->mAllowingIncludedByMultiConfigs); APSARA_TEST_TRUE(BOOL_FLAG(enable_root_path_collection)); } @@ -207,7 +207,7 @@ void FileDiscoveryOptionsUnittest::OnFailedInit() const { // no FilePaths config.reset(new FileDiscoveryOptions()); - APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginType)); // more than 1 file path configStr = R"( @@ -219,7 +219,7 @@ void FileDiscoveryOptionsUnittest::OnFailedInit() const { configJson["FilePaths"].append(Json::Value(filePath.string())); configJson["FilePaths"].append(Json::Value(filePath.string())); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginType)); // invlaid filepath filePath = filesystem::current_path(); @@ -231,7 +231,7 @@ void FileDiscoveryOptionsUnittest::OnFailedInit() const { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); configJson["FilePaths"].append(Json::Value(filePath.string() + filesystem::path::preferred_separator)); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginType)); } void FileDiscoveryOptionsUnittest::TestFilePaths() const { @@ -245,7 +245,7 @@ void FileDiscoveryOptionsUnittest::TestFilePaths() const { configJson["FilePaths"].append(Json::Value(filePath.string())); configJson["MaxDirSearchDepth"] = Json::Value(1); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(0, config->mMaxDirSearchDepth); APSARA_TEST_EQUAL((filesystem::current_path() / "test").string(), config->GetBasePath()); APSARA_TEST_EQUAL("*.log", config->GetFilePattern()); @@ -256,7 +256,7 @@ void FileDiscoveryOptionsUnittest::TestFilePaths() const { configJson["FilePaths"].append(Json::Value(filePath.string())); configJson["MaxDirSearchDepth"] = Json::Value(1); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(0, config->mMaxDirSearchDepth); APSARA_TEST_EQUAL((filesystem::current_path() / "*" / "test" / "?").string(), config->GetBasePath()); APSARA_TEST_EQUAL("*.log", config->GetFilePattern()); @@ -276,7 +276,7 @@ void FileDiscoveryOptionsUnittest::TestFilePaths() const { configJson["FilePaths"].append(Json::Value(filePath.string())); configJson["MaxDirSearchDepth"] = Json::Value(1); config.reset(new FileDiscoveryOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(1, config->mMaxDirSearchDepth); APSARA_TEST_EQUAL((filesystem::current_path() / "*" / "test").string(), config->GetBasePath()); APSARA_TEST_EQUAL("*.log", config->GetFilePattern()); diff --git a/core/unittest/file_source/MultilineOptionsUnittest.cpp b/core/unittest/file_source/MultilineOptionsUnittest.cpp index 6aa956c728..481064b659 100644 --- a/core/unittest/file_source/MultilineOptionsUnittest.cpp +++ b/core/unittest/file_source/MultilineOptionsUnittest.cpp @@ -31,7 +31,7 @@ class MultilineOptionsUnittest : public testing::Test { void OnSuccessfulInit() const; private: - const string pluginName = "test"; + const string pluginType = "test"; PipelineContext ctx; }; @@ -63,7 +63,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::Mode::CUSTOM, config->mMode); APSARA_TEST_EQUAL("\\d+:\\d+:\\d", config->mStartPattern); APSARA_TEST_EQUAL("aaa", config->mContinuePattern); @@ -86,7 +86,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::Mode::CUSTOM, config->mMode); APSARA_TEST_EQUAL("", config->mStartPattern); APSARA_TEST_EQUAL("", config->mContinuePattern); @@ -108,7 +108,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::Mode::JSON, config->mMode); APSARA_TEST_EQUAL("", config->mStartPattern); APSARA_TEST_EQUAL("", config->mContinuePattern); @@ -125,7 +125,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::Mode::CUSTOM, config->mMode); APSARA_TEST_EQUAL("\\d+:\\d+:\\d", config->mStartPattern); APSARA_TEST_EQUAL("aaa", config->mContinuePattern); @@ -142,7 +142,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::Mode::CUSTOM, config->mMode); APSARA_TEST_EQUAL(".*", config->mStartPattern); APSARA_TEST_EQUAL(".*", config->mContinuePattern); @@ -161,7 +161,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::Mode::CUSTOM, config->mMode); APSARA_TEST_EQUAL("", config->mStartPattern); APSARA_TEST_EQUAL("", config->mContinuePattern); @@ -180,7 +180,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::Mode::CUSTOM, config->mMode); APSARA_TEST_EQUAL("", config->mStartPattern); APSARA_TEST_EQUAL("aaa", config->mContinuePattern); @@ -198,7 +198,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::UnmatchedContentTreatment::DISCARD, config->mUnmatchedContentTreatment); configStr = R"( @@ -208,7 +208,7 @@ void MultilineOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new MultilineOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(MultilineOptions::UnmatchedContentTreatment::SINGLE_LINE, config->mUnmatchedContentTreatment); } diff --git a/core/unittest/flusher/FlusherSLSUnittest.cpp b/core/unittest/flusher/FlusherSLSUnittest.cpp index 792ab88cd6..3edd9db556 100644 --- a/core/unittest/flusher/FlusherSLSUnittest.cpp +++ b/core/unittest/flusher/FlusherSLSUnittest.cpp @@ -22,17 +22,17 @@ #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif -#include "compression/CompressorFactory.h" -#include "flusher/sls/FlusherSLS.h" -#include "flusher/sls/PackIdManager.h" -#include "flusher/sls/SLSClientManager.h" +#include "pipeline/compression/CompressorFactory.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "plugin/flusher/sls/PackIdManager.h" +#include "plugin/flusher/sls/SLSClientManager.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKeyManager.h" -#include "queue/SLSSenderQueueItem.h" -#include "queue/SenderQueueManager.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SLSSenderQueueItem.h" +#include "pipeline/queue/SenderQueueManager.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(batch_send_interval); @@ -56,7 +56,7 @@ class FlusherSLSUnittest : public testing::Test { void OnGoPipelineSend(); protected: - void SetUp() override { + void SetUp() override { ctx.SetConfigName("test_config"); ctx.SetPipeline(pipeline); } @@ -349,7 +349,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { { "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/4", "detail": { "EnableShardHash": false } @@ -359,11 +359,12 @@ void FlusherSLSUnittest::OnSuccessfulInit() { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); APSARA_TEST_TRUE(ParseJsonTable(optionalGoPipelineStr, optionalGoPipelineJson, errorMsg)); + pipeline.mPluginID.store(4); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); - APSARA_TEST_TRUE(optionalGoPipelineJson == optionalGoPipeline); + APSARA_TEST_EQUAL(optionalGoPipelineJson.toStyledString(), optionalGoPipeline.toStyledString()); SenderQueueManager::GetInstance()->Clear(); } @@ -460,14 +461,12 @@ void FlusherSLSUnittest::OnFailedInit() { } void FlusherSLSUnittest::OnPipelineUpdate() { - PipelineContext ctx1, ctx2; + PipelineContext ctx1; ctx1.SetConfigName("test_config_1"); - ctx2.SetConfigName("test_config_2"); Json::Value configJson, optionalGoPipeline; - FlusherSLS flusher1, flusher2; + FlusherSLS flusher1; flusher1.SetContext(ctx1); - flusher2.SetContext(ctx2); string configStr, errorMsg; configStr = R"( @@ -481,43 +480,70 @@ void FlusherSLSUnittest::OnPipelineUpdate() { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); APSARA_TEST_TRUE(flusher1.Init(configJson, optionalGoPipeline)); - - configStr = R"( - { - "Type": "flusher_sls", - "Project": "test_project_2", - "Logstore": "test_logstore_2", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", - "Aliuid": "123456789" - } - )"; - APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - APSARA_TEST_TRUE(flusher2.Init(configJson, optionalGoPipeline)); - APSARA_TEST_TRUE(flusher1.Start()); APSARA_TEST_EQUAL(1U, FlusherSLS::sProjectRefCntMap.size()); APSARA_TEST_TRUE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); APSARA_TEST_EQUAL(1U, SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").size()); - APSARA_TEST_TRUE(flusher2.Start()); - APSARA_TEST_EQUAL(2U, FlusherSLS::sProjectRefCntMap.size()); - APSARA_TEST_TRUE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); -#ifdef __ENTERPRISE__ - APSARA_TEST_EQUAL(2U, SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").size()); -#else - APSARA_TEST_EQUAL(1U, SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").size()); -#endif + { + PipelineContext ctx2; + ctx2.SetConfigName("test_config_2"); + FlusherSLS flusher2; + flusher2.SetContext(ctx2); + configStr = R"( + { + "Type": "flusher_sls", + "Project": "test_project_2", + "Logstore": "test_logstore_2", + "Region": "cn-hangzhou", + "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Aliuid": "123456789" + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + APSARA_TEST_TRUE(flusher2.Init(configJson, optionalGoPipeline)); + + APSARA_TEST_TRUE(flusher1.Stop(false)); + APSARA_TEST_TRUE(FlusherSLS::sProjectRefCntMap.empty()); + APSARA_TEST_FALSE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); + APSARA_TEST_TRUE(SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").empty()); + APSARA_TEST_TRUE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher1.GetQueueKey())); + + APSARA_TEST_TRUE(flusher2.Start()); + APSARA_TEST_EQUAL(1U, FlusherSLS::sProjectRefCntMap.size()); + APSARA_TEST_TRUE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); + APSARA_TEST_EQUAL(1U, SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").size()); + APSARA_TEST_TRUE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher1.GetQueueKey())); + APSARA_TEST_FALSE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher2.GetQueueKey())); + flusher2.Stop(true); + flusher1.Start(); + } + { + PipelineContext ctx2; + ctx2.SetConfigName("test_config_1"); + FlusherSLS flusher2; + flusher2.SetContext(ctx2); + configStr = R"( + { + "Type": "flusher_sls", + "Project": "test_project", + "Logstore": "test_logstore", + "Region": "cn-hangzhou", + "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Aliuid": "123456789" + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + APSARA_TEST_TRUE(flusher2.Init(configJson, optionalGoPipeline)); - APSARA_TEST_TRUE(flusher2.Stop(true)); - APSARA_TEST_EQUAL(1U, FlusherSLS::sProjectRefCntMap.size()); - APSARA_TEST_TRUE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); - APSARA_TEST_EQUAL(1U, SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").size()); + APSARA_TEST_TRUE(flusher1.Stop(false)); + APSARA_TEST_TRUE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher1.GetQueueKey())); - APSARA_TEST_TRUE(flusher1.Stop(true)); - APSARA_TEST_TRUE(FlusherSLS::sProjectRefCntMap.empty()); - APSARA_TEST_FALSE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); - APSARA_TEST_TRUE(SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").empty()); + APSARA_TEST_TRUE(flusher2.Start()); + APSARA_TEST_FALSE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher1.GetQueueKey())); + APSARA_TEST_FALSE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher2.GetQueueKey())); + flusher2.Stop(true); + } } void FlusherSLSUnittest::TestSend() { @@ -958,24 +984,69 @@ void FlusherSLSUnittest::TestAddPackId() { } void FlusherSLSUnittest::OnGoPipelineSend() { - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; - configStr = R"( + { + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + configStr = R"( + { + "Type": "flusher_sls", + "Project": "test_project", + "Logstore": "test_logstore", + "Region": "cn-hangzhou", + "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Aliuid": "123456789" + } + )"; + ParseJsonTable(configStr, configJson, errorMsg); + FlusherSLS flusher; + flusher.SetContext(ctx); + flusher.Init(configJson, optionalGoPipeline); { - "Type": "flusher_sls", - "Project": "test_project", - "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", - "Aliuid": "123456789" + APSARA_TEST_TRUE(flusher.Send("content", "shardhash_key", "other_logstore")); + + vector res; + SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + + APSARA_TEST_EQUAL(1U, res.size()); + auto item = static_cast(res[0]); + APSARA_TEST_EQUAL(RawDataType::EVENT_GROUP, item->mType); + APSARA_TEST_TRUE(item->mBufferOrNot); + APSARA_TEST_EQUAL(&flusher, item->mFlusher); + APSARA_TEST_EQUAL(flusher.mQueueKey, item->mQueueKey); + APSARA_TEST_EQUAL("shardhash_key", item->mShardHashKey); + APSARA_TEST_EQUAL("other_logstore", item->mLogstore); + + auto compressor + = CompressorFactory::GetInstance()->Create(Json::Value(), ctx, "flusher_sls", CompressType::LZ4); + string output; + output.resize(item->mRawSize); + APSARA_TEST_TRUE(compressor->UnCompress(item->mData, output, errorMsg)); + APSARA_TEST_EQUAL("content", output); } - )"; - ParseJsonTable(configStr, configJson, errorMsg); - FlusherSLS flusher; - flusher.SetContext(ctx); - flusher.Init(configJson, optionalGoPipeline); + { + APSARA_TEST_TRUE(flusher.Send("content", "shardhash_key", "")); + + vector res; + SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + + APSARA_TEST_EQUAL(1U, res.size()); + auto item = static_cast(res[0]); + APSARA_TEST_EQUAL("test_logstore", item->mLogstore); + } + } { - APSARA_TEST_TRUE(flusher.Send("content", "shardhash_key", "other_logstore")); + // go profile flusher has no context + FlusherSLS flusher; + flusher.mProject = "test_project"; + flusher.mLogstore = "test_logstore"; + flusher.mCompressor = CompressorFactory::GetInstance()->Create( + Json::Value(), PipelineContext(), "flusher_sls", CompressType::LZ4); + + APSARA_TEST_TRUE(flusher.Send("content", "")); + + auto key = QueueKeyManager::GetInstance()->GetKey("test_project-test_logstore"); + + APSARA_TEST_NOT_EQUAL(nullptr, SenderQueueManager::GetInstance()->GetQueue(key)); vector res; SenderQueueManager::GetInstance()->GetAllAvailableItems(res); @@ -985,27 +1056,17 @@ void FlusherSLSUnittest::OnGoPipelineSend() { APSARA_TEST_EQUAL(RawDataType::EVENT_GROUP, item->mType); APSARA_TEST_TRUE(item->mBufferOrNot); APSARA_TEST_EQUAL(&flusher, item->mFlusher); - APSARA_TEST_EQUAL(flusher.mQueueKey, item->mQueueKey); - APSARA_TEST_EQUAL("shardhash_key", item->mShardHashKey); - APSARA_TEST_EQUAL("other_logstore", item->mLogstore); + APSARA_TEST_EQUAL(key, item->mQueueKey); + APSARA_TEST_EQUAL("test_logstore", item->mLogstore); auto compressor = CompressorFactory::GetInstance()->Create(Json::Value(), ctx, "flusher_sls", CompressType::LZ4); string output; output.resize(item->mRawSize); + string errorMsg; APSARA_TEST_TRUE(compressor->UnCompress(item->mData, output, errorMsg)); APSARA_TEST_EQUAL("content", output); } - { - APSARA_TEST_TRUE(flusher.Send("content", "shardhash_key", "")); - - vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); - - APSARA_TEST_EQUAL(1U, res.size()); - auto item = static_cast(res[0]); - APSARA_TEST_EQUAL("test_logstore", item->mLogstore); - } } UNIT_TEST_CASE(FlusherSLSUnittest, OnSuccessfulInit) diff --git a/core/unittest/flusher/PackIdManagerUnittest.cpp b/core/unittest/flusher/PackIdManagerUnittest.cpp index becec57f89..76c03ee5ce 100644 --- a/core/unittest/flusher/PackIdManagerUnittest.cpp +++ b/core/unittest/flusher/PackIdManagerUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/PackIdManager.h" +#include "plugin/flusher/sls/PackIdManager.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/input/InputContainerStdioUnittest.cpp b/core/unittest/input/InputContainerStdioUnittest.cpp index 61d7d4ceb5..94e2a89819 100644 --- a/core/unittest/input/InputContainerStdioUnittest.cpp +++ b/core/unittest/input/InputContainerStdioUnittest.cpp @@ -22,10 +22,10 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "file_server/FileServer.h" -#include "input/InputContainerStdio.h" +#include "plugin/input/InputContainerStdio.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" -#include "plugin/PluginRegistry.h" +#include "pipeline/plugin/PluginRegistry.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(default_plugin_log_queue_size); @@ -52,6 +52,7 @@ class InputContainerStdioUnittest : public testing::Test { void SetUp() override { ctx.SetConfigName("test_config"); + p.mPluginID.store(0); ctx.SetPipeline(p); } @@ -108,7 +109,6 @@ void InputContainerStdioUnittest::OnSuccessfulInit() { unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - uint32_t pluginIdx = 0; // only mandatory param configStr = R"( @@ -182,7 +182,10 @@ void InputContainerStdioUnittest::OnEnableContainerDiscovery() { unique_ptr input; Json::Value configJson, optionalGoPipelineJson, optionalGoPipeline; string configStr, optionalGoPipelineStr, errorMsg; - uint32_t pluginIdx = 0; + Pipeline pipeline; + pipeline.mPluginID.store(0); + ctx.SetPipeline(pipeline); + configStr = R"( { @@ -200,7 +203,7 @@ void InputContainerStdioUnittest::OnEnableContainerDiscovery() { }, "inputs": [ { - "type": "metric_container_info", + "type": "metric_container_info/2", "detail": { "K8sNamespaceRegex": "default" } @@ -211,18 +214,18 @@ void InputContainerStdioUnittest::OnEnableContainerDiscovery() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); APSARA_TEST_TRUE(ParseJsonTable(optionalGoPipelineStr, optionalGoPipelineJson, errorMsg)); optionalGoPipelineJson["global"]["DefaultLogQueueSize"] = Json::Value(INT32_FLAG(default_plugin_log_queue_size)); + PluginInstance::PluginMeta meta = ctx.GetPipeline().GenNextPluginMeta(false); input.reset(new InputContainerStdio()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputContainerStdio::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputContainerStdio::sName, meta.mPluginID, meta.mNodeID, meta.mChildNodeID); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_TRUE(optionalGoPipelineJson == optionalGoPipeline); + APSARA_TEST_EQUAL(optionalGoPipelineJson.toStyledString(), optionalGoPipeline.toStyledString()); } void InputContainerStdioUnittest::OnPipelineUpdate() { Json::Value configJson, optionalGoPipeline; InputContainerStdio input; string configStr, errorMsg; - uint32_t pluginIdx = 0; configStr = R"( { diff --git a/core/unittest/input/InputEBPFFileSecurityUnittest.cpp b/core/unittest/input/InputEBPFFileSecurityUnittest.cpp index 8051ed20fc..bdee6ecfa5 100644 --- a/core/unittest/input/InputEBPFFileSecurityUnittest.cpp +++ b/core/unittest/input/InputEBPFFileSecurityUnittest.cpp @@ -19,7 +19,7 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "ebpf/config.h" -#include "input/InputEBPFFileSecurity.h" +#include "plugin/input/InputEBPFFileSecurity.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" #include "unittest/Unittest.h" @@ -61,14 +61,10 @@ void InputEBPFFileSecurityUnittest::OnSuccessfulInit() { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], + "CallNameFilter": ["security_file_permission"], "FilePathFilter": [ - { - "FilePath": "/etc", - }, - { - "FilePath": "/bin" - } + "/etc", + "/bin" ] } ] @@ -80,10 +76,10 @@ void InputEBPFFileSecurityUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); nami::SecurityFileFilter thisFilter1 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::FILE, input->mSecurityOptions.filter_Type); + // APSARA_TEST_EQUAL(ebpf::SecurityProbeType::FILE, input->mSecurityOptions.filter_Type); APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("/etc", thisFilter1.mFileFilterItem[0].mFilePath); - APSARA_TEST_EQUAL("/bin", thisFilter1.mFileFilterItem[1].mFilePath); + APSARA_TEST_EQUAL("/etc", thisFilter1.mFilePathList[0]); + APSARA_TEST_EQUAL("/bin", thisFilter1.mFilePathList[1]); // valid optional param configStr = R"( @@ -91,19 +87,11 @@ void InputEBPFFileSecurityUnittest::OnSuccessfulInit() { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], + "CallNameFilter": ["security_file_permission"], "FilePathFilter": [ - { - "FilePath": "/etc", - "FileName": "passwd" - }, - { - "FilePath": "/etc", - "FileName": "shadow" - }, - { - "FilePath": "/bin" - } + "/etc/passwd", + "/etc/shadow", + "/bin" ] } ] @@ -115,13 +103,11 @@ void InputEBPFFileSecurityUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); nami::SecurityFileFilter thisFilter2 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::FILE, input->mSecurityOptions.filter_Type); + // APSARA_TEST_EQUAL(ebpf::SecurityProbeType::FILE, input->mSecurityOptions.filter_Type); APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("/etc", thisFilter2.mFileFilterItem[0].mFilePath); - APSARA_TEST_EQUAL("passwd", thisFilter2.mFileFilterItem[0].mFileName); - APSARA_TEST_EQUAL("/etc", thisFilter2.mFileFilterItem[1].mFilePath); - APSARA_TEST_EQUAL("shadow", thisFilter2.mFileFilterItem[1].mFileName); - APSARA_TEST_EQUAL("/bin", thisFilter2.mFileFilterItem[2].mFilePath); + APSARA_TEST_EQUAL("/etc/passwd", thisFilter2.mFilePathList[0]); + APSARA_TEST_EQUAL("/etc/shadow", thisFilter2.mFilePathList[1]); + APSARA_TEST_EQUAL("/bin", thisFilter2.mFilePathList[2]); } void InputEBPFFileSecurityUnittest::OnFailedInit() { @@ -135,13 +121,8 @@ void InputEBPFFileSecurityUnittest::OnFailedInit() { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], - "FilePathFilter": [ - { - "FilePath": 1, - "FileName": "name" - } - ] + "CallNameFilter": ["security_file_permission"], + "FilePathFilter": [1] } ] } @@ -149,7 +130,11 @@ void InputEBPFFileSecurityUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input.reset(new InputEBPFFileSecurity()); input->SetContext(ctx); - APSARA_TEST_FALSE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); + nami::SecurityFileFilter thisFilter = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); + APSARA_TEST_EQUAL(0, thisFilter.mFilePathList.size()); // invalid optional param configStr = R"( @@ -157,12 +142,10 @@ void InputEBPFFileSecurityUnittest::OnFailedInit() { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], + "CallNameFilter": ["security_file_permission"], "FilePathFilter": [ - { - "FilePath": "/etc", - "FileName": 1 - } + "/etc", + 1 ] } ] @@ -174,10 +157,9 @@ void InputEBPFFileSecurityUnittest::OnFailedInit() { APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); nami::SecurityFileFilter thisFilter1 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::FILE, input->mSecurityOptions.filter_Type); + // APSARA_TEST_EQUAL(ebpf::SecurityProbeType::FILE, input->mSecurityOptions.filter_Type); APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("/etc", thisFilter1.mFileFilterItem[0].mFilePath); - APSARA_TEST_EQUAL("", thisFilter1.mFileFilterItem[0].mFileName); + APSARA_TEST_EQUAL(0, thisFilter1.mFilePathList.size()); // lose mandatory param configStr = R"( @@ -185,12 +167,6 @@ void InputEBPFFileSecurityUnittest::OnFailedInit() { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], - "FilePathFilter": [ - { - "FileName": "passwd" - } - ] } ] } @@ -198,16 +174,18 @@ void InputEBPFFileSecurityUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input.reset(new InputEBPFFileSecurity()); input->SetContext(ctx); - APSARA_TEST_FALSE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); + APSARA_TEST_EQUAL(1, input->mSecurityOptions.mOptionList.size()); // default callname + APSARA_TEST_EQUAL(3, input->mSecurityOptions.mOptionList[0].call_names_.size()); // default callname - // error param level + // invalid callname configStr = R"( { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], - "FileName": "passwd" + "CallNameFilter": ["security_file_permission_error"], } ] } @@ -217,10 +195,8 @@ void InputEBPFFileSecurityUnittest::OnFailedInit() { input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(input->sName, "input_ebpf_fileprobe_security"); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::FILE, input->mSecurityOptions.filter_Type); - APSARA_TEST_EQUAL("security_file_permission", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL( - 0, std::get(input->mSecurityOptions.mOptionList[0].filter_).mFileFilterItem.size()); + APSARA_TEST_EQUAL(1, input->mSecurityOptions.mOptionList.size()); // default callname + APSARA_TEST_EQUAL(3, input->mSecurityOptions.mOptionList[0].call_names_.size()); // default callname } void InputEBPFFileSecurityUnittest::OnSuccessfulStart() { @@ -228,20 +204,15 @@ void InputEBPFFileSecurityUnittest::OnSuccessfulStart() { Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - // only mandatory param configStr = R"( { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], + "CallNameFilter": ["security_file_permission"], "FilePathFilter": [ - { - "FilePath": "/etc", - }, - { - "FilePath": "/bin" - } + "/etc", + "/bin" ] } ] @@ -262,20 +233,15 @@ void InputEBPFFileSecurityUnittest::OnSuccessfulStop() { Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - // only mandatory param configStr = R"( { "Type": "input_ebpf_fileprobe_security", "ProbeConfig": [ { - "CallName": ["security_file_permission"], + "CallNameFilter": ["security_file_permission"], "FilePathFilter": [ - { - "FilePath": "/etc", - }, - { - "FilePath": "/bin" - } + "/etc", + "/bin" ] } ] diff --git a/core/unittest/input/InputEBPFNetworkObserverUnittest.cpp b/core/unittest/input/InputEBPFNetworkObserverUnittest.cpp index d1fa58d657..068c5219d7 100644 --- a/core/unittest/input/InputEBPFNetworkObserverUnittest.cpp +++ b/core/unittest/input/InputEBPFNetworkObserverUnittest.cpp @@ -17,7 +17,7 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "ebpf/config.h" -#include "input/InputEBPFNetworkObserver.h" +#include "plugin/input/InputEBPFNetworkObserver.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" #include "unittest/Unittest.h" diff --git a/core/unittest/input/InputEBPFNetworkSecurityUnittest.cpp b/core/unittest/input/InputEBPFNetworkSecurityUnittest.cpp index ed0c08ff63..48e3eeace7 100644 --- a/core/unittest/input/InputEBPFNetworkSecurityUnittest.cpp +++ b/core/unittest/input/InputEBPFNetworkSecurityUnittest.cpp @@ -17,7 +17,7 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "ebpf/config.h" -#include "input/InputEBPFNetworkSecurity.h" +#include "plugin/input/InputEBPFNetworkSecurity.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" #include "unittest/Unittest.h" @@ -59,7 +59,7 @@ void InputEBPFNetworkSecurityUnittest::OnSuccessfulInit() { "Type": "input_ebpf_sockettraceprobe_security", "ProbeConfig": [ { - "CallName": ["tcp_connect", "tcp_close"], + "CallNameFilter": ["tcp_connect", "tcp_close"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [80], @@ -68,7 +68,7 @@ void InputEBPFNetworkSecurityUnittest::OnSuccessfulInit() { } }, { - "CallName": ["tcp_sendmsg"], + "CallNameFilter": ["tcp_sendmsg"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [80] @@ -82,17 +82,18 @@ void InputEBPFNetworkSecurityUnittest::OnSuccessfulInit() { input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_security"); - nami::SecurityNetworkFilter thisFilter1 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::NETWORK, input->mSecurityOptions.filter_Type); + nami::SecurityNetworkFilter thisFilter1 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); APSARA_TEST_EQUAL("tcp_connect", input->mSecurityOptions.mOptionList[0].call_names_[0]); APSARA_TEST_EQUAL("tcp_close", input->mSecurityOptions.mOptionList[0].call_names_[1]); APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter1.mDestAddrList[0]); APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter1.mDestAddrList[1]); - APSARA_TEST_EQUAL(80, thisFilter1.mDestPortList[0]); + APSARA_TEST_EQUAL(1, thisFilter1.mDestPortList.size()); APSARA_TEST_EQUAL("127.0.0.1/8", thisFilter1.mSourceAddrBlackList[0]); APSARA_TEST_EQUAL(9300, thisFilter1.mSourcePortBlackList[0]); - - nami::SecurityNetworkFilter thisFilter2 = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL("tcp_sendmsg", input->mSecurityOptions.mOptionList[1].call_names_[0]); + nami::SecurityNetworkFilter thisFilter2 + = std::get(input->mSecurityOptions.mOptionList[1].filter_); APSARA_TEST_EQUAL("tcp_sendmsg", input->mSecurityOptions.mOptionList[1].call_names_[0]); APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter2.mDestAddrList[0]); APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter2.mDestAddrList[1]); @@ -110,20 +111,13 @@ void InputEBPFNetworkSecurityUnittest::OnFailedInit() { "Type": "input_ebpf_sockettraceprobe_security", "ProbeConfig": [ { - "CallName": ["tcp_connect", "tcp_close"], + "CallNameFilter": ["tcp_connect", "tcp_close"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": ["80"], "SourceAddrBlackList": ["127.0.0.1/8"], "SourcePortBlackList": [9300] } - }, - { - "CallName": ["tcp_sendmsg"], - "AddrFilter": { - "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], - "DestPortList": [80] - } } ] } @@ -133,29 +127,103 @@ void InputEBPFNetworkSecurityUnittest::OnFailedInit() { input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_security"); - nami::SecurityNetworkFilter thisFilter1 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::NETWORK, input->mSecurityOptions.filter_Type); + nami::SecurityNetworkFilter thisFilter1 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); APSARA_TEST_EQUAL("tcp_connect", input->mSecurityOptions.mOptionList[0].call_names_[0]); - APSARA_TEST_EQUAL("tcp_close", input->mSecurityOptions.mOptionList[0].call_names_[1]); APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter1.mDestAddrList[0]); APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter1.mDestAddrList[1]); APSARA_TEST_EQUAL(0, thisFilter1.mDestPortList.size()); APSARA_TEST_EQUAL("127.0.0.1/8", thisFilter1.mSourceAddrBlackList[0]); APSARA_TEST_EQUAL(9300, thisFilter1.mSourcePortBlackList[0]); - nami::SecurityNetworkFilter thisFilter2 = std::get(input->mSecurityOptions.mOptionList[1].filter_); - APSARA_TEST_EQUAL("tcp_sendmsg", input->mSecurityOptions.mOptionList[1].call_names_[0]); + // duplicate callname + configStr = R"( + { + "Type": "input_ebpf_sockettraceprobe_security", + "ProbeConfig": [ + { + "CallNameFilter": ["tcp_connect", "tcp_close"], + "AddrFilter": { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": [80], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] + } + }, + { + "CallNameFilter": ["tcp_connect"], + "AddrFilter": { + "DestAddrList": ["10.0.0.1/8"], + "DestPortList": [70] + } + } + ] + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_security"); + APSARA_TEST_EQUAL(1, input->mSecurityOptions.mOptionList.size()); + APSARA_TEST_EQUAL("tcp_connect", input->mSecurityOptions.mOptionList[0].call_names_[0]); + APSARA_TEST_EQUAL("tcp_close", input->mSecurityOptions.mOptionList[0].call_names_[1]); + nami::SecurityNetworkFilter thisFilter2 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter2.mDestAddrList[0]); APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter2.mDestAddrList[1]); APSARA_TEST_EQUAL(80, thisFilter2.mDestPortList[0]); + // one duplicate callname of two + configStr = R"( + { + "Type": "input_ebpf_sockettraceprobe_security", + "ProbeConfig": [ + { + "CallNameFilter": ["tcp_connect", "tcp_close"], + "AddrFilter": { + "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], + "DestPortList": [80], + "SourceAddrBlackList": ["127.0.0.1/8"], + "SourcePortBlackList": [9300] + } + }, + { + "CallNameFilter": ["tcp_connect", "tcp_sendmsg"], + "AddrFilter": { + "DestAddrList": ["10.0.0.1/8"], + "DestPortList": [70] + } + } + ] + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->sName, "input_ebpf_sockettraceprobe_security"); + APSARA_TEST_EQUAL(2, input->mSecurityOptions.mOptionList.size()); + APSARA_TEST_EQUAL("tcp_connect", input->mSecurityOptions.mOptionList[0].call_names_[0]); + APSARA_TEST_EQUAL("tcp_close", input->mSecurityOptions.mOptionList[0].call_names_[1]); + nami::SecurityNetworkFilter thisFilter3 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL("10.0.0.0/8", thisFilter3.mDestAddrList[0]); + APSARA_TEST_EQUAL("92.168.0.0/16", thisFilter3.mDestAddrList[1]); + APSARA_TEST_EQUAL(80, thisFilter3.mDestPortList[0]); + APSARA_TEST_EQUAL("tcp_sendmsg", input->mSecurityOptions.mOptionList[1].call_names_[0]); + nami::SecurityNetworkFilter thisFilter4 + = std::get(input->mSecurityOptions.mOptionList[1].filter_); + APSARA_TEST_EQUAL("10.0.0.1/8", thisFilter4.mDestAddrList[0]); + APSARA_TEST_EQUAL(70, thisFilter4.mDestPortList[0]); + // error param level configStr = R"( { "Type": "input_ebpf_sockettraceprobe_security", "ProbeConfig": [ { - "CallName": ["tcp_connect", "tcp_close"], + "CallNameFilter": ["tcp_connect", "tcp_close"], "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": ["80"], "SourceAddrBlackList": ["127.0.0.1/8"], @@ -168,18 +236,21 @@ void InputEBPFNetworkSecurityUnittest::OnFailedInit() { input.reset(new InputEBPFNetworkSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - nami::SecurityNetworkFilter thisFilter3 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL(0, thisFilter3.mDestAddrList.size()); - APSARA_TEST_EQUAL(0, thisFilter3.mDestPortList.size()); + nami::SecurityNetworkFilter thisFilter5 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL(thisFilter5.mDestAddrList.size(), 0); + APSARA_TEST_EQUAL(thisFilter5.mDestPortList.size(), 0); + APSARA_TEST_EQUAL(thisFilter5.mSourceAddrBlackList.size(), 0); + APSARA_TEST_EQUAL(thisFilter5.mSourcePortBlackList.size(), 0); // valid and invalid optional param - // if the optional param in a list is invalid, the valid param will be ignored only when after it + // if the optional param in a list is invalid, the valid param after it will be read configStr = R"( { "Type": "input_ebpf_sockettraceprobe_security", "ProbeConfig": [ { - "CallName": ["tcp_connect", "tcp_close"], + "CallNameFilter": ["tcp_connect", "tcp_close"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [40, "80", 160], @@ -194,9 +265,27 @@ void InputEBPFNetworkSecurityUnittest::OnFailedInit() { input.reset(new InputEBPFNetworkSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - nami::SecurityNetworkFilter thisFilter4 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL(2, thisFilter4.mDestAddrList.size()); - APSARA_TEST_EQUAL(1, thisFilter4.mDestPortList.size()); + nami::SecurityNetworkFilter thisFilter6 + = std::get(input->mSecurityOptions.mOptionList[0].filter_); + APSARA_TEST_EQUAL(2, thisFilter6.mDestAddrList.size()); + + // invalid callname + configStr = R"( + { + "Type": "input_ebpf_sockettraceprobe_security", + "ProbeConfig": [ + { + "CallNameFilter": ["udp"], + } + ] + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFNetworkSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(1, input->mSecurityOptions.mOptionList.size()); + APSARA_TEST_EQUAL(3, input->mSecurityOptions.mOptionList[0].call_names_.size()); } void InputEBPFNetworkSecurityUnittest::OnSuccessfulStart() { @@ -204,13 +293,12 @@ void InputEBPFNetworkSecurityUnittest::OnSuccessfulStart() { Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - // valid optional param configStr = R"( { "Type": "input_ebpf_sockettraceprobe_security", "ProbeConfig": [ { - "CallName": ["tcp_connect", "tcp_close"], + "CallNameFilter": ["tcp_connect", "tcp_close"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [80], @@ -219,7 +307,7 @@ void InputEBPFNetworkSecurityUnittest::OnSuccessfulStart() { } }, { - "CallName": ["tcp_sendmsg"], + "CallNameFilter": ["tcp_sendmsg"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [80] @@ -243,13 +331,12 @@ void InputEBPFNetworkSecurityUnittest::OnSuccessfulStop() { Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - // valid optional param configStr = R"( { "Type": "input_ebpf_sockettraceprobe_security", "ProbeConfig": [ { - "CallName": ["tcp_connect", "tcp_close"], + "CallNameFilter": ["tcp_connect", "tcp_close"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [80], @@ -258,7 +345,7 @@ void InputEBPFNetworkSecurityUnittest::OnSuccessfulStop() { } }, { - "CallName": ["tcp_sendmsg"], + "CallNameFilter": ["tcp_sendmsg"], "AddrFilter": { "DestAddrList": ["10.0.0.0/8","92.168.0.0/16"], "DestPortList": [80] diff --git a/core/unittest/input/InputEBPFProcessSecurityUnittest.cpp b/core/unittest/input/InputEBPFProcessSecurityUnittest.cpp index 625d23ef5d..7fa83255d4 100644 --- a/core/unittest/input/InputEBPFProcessSecurityUnittest.cpp +++ b/core/unittest/input/InputEBPFProcessSecurityUnittest.cpp @@ -17,7 +17,7 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "ebpf/config.h" -#include "input/InputEBPFProcessSecurity.h" +#include "plugin/input/InputEBPFProcessSecurity.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" #include "unittest/Unittest.h" @@ -53,25 +53,17 @@ void InputEBPFProcessSecurityUnittest::OnSuccessfulInit() { Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - // only NamespaceFilter + // valid param configStr = R"( { "Type": "input_ebpf_processprobe_security", "ProbeConfig": [ { - "NamespaceFilter": [ - { - "NamespaceType": "Pid", - "ValueList": [ - "4026531833" - ] - }, - { - "NamespaceType": "Mnt", - "ValueList": [ - "4026531834" - ] - } + "CallNameFilter": [ + "sys_enter_execve", + "disassociate_ctty", + "acct_process", + "wake_up_new_task" ] } ] @@ -82,32 +74,44 @@ void InputEBPFProcessSecurityUnittest::OnSuccessfulInit() { input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(input->sName, "input_ebpf_processprobe_security"); - nami::SecurityProcessFilter thisFilter1 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::PROCESS, input->mSecurityOptions.filter_Type); - APSARA_TEST_EQUAL("4026531833", thisFilter1.mNamespaceFilter[0].mValueList[0]); - APSARA_TEST_EQUAL("Pid", thisFilter1.mNamespaceFilter[0].mNamespaceType); - APSARA_TEST_EQUAL("4026531834", thisFilter1.mNamespaceFilter[1].mValueList[0]); - APSARA_TEST_EQUAL("Mnt", thisFilter1.mNamespaceFilter[1].mNamespaceType); + // four callnames + APSARA_TEST_EQUAL("sys_enter_execve", input->mSecurityOptions.mOptionList[0].call_names_[0]); + APSARA_TEST_EQUAL("disassociate_ctty", input->mSecurityOptions.mOptionList[0].call_names_[1]); + APSARA_TEST_EQUAL("acct_process", input->mSecurityOptions.mOptionList[0].call_names_[2]); + APSARA_TEST_EQUAL("wake_up_new_task", input->mSecurityOptions.mOptionList[0].call_names_[3]); + // no general filter, default is monostate + APSARA_TEST_EQUAL(std::holds_alternative(input->mSecurityOptions.mOptionList[0].filter_), true); +} + +void InputEBPFProcessSecurityUnittest::OnFailedInit() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; - // only NamespaceBlackFilter + // no probeconfig + configStr = R"( + { + "Type": "input_ebpf_processprobe_security" + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname + + // probeconfig typo error configStr = R"( { "Type": "input_ebpf_processprobe_security", - "ProbeConfig": [ + "ProbeConfiggg": [ { - "NamespaceBlackFilter": [ - { - "NamespaceType": "Pid", - "ValueList": [ - "4026531833" - ] - }, - { - "NamespaceType": "Mnt", - "ValueList": [ - "4026531834" - ] - } + "CallNameFilter": [ + "sys_enter_execve", + "disassociate_ctty", + "acct_process", + "wake_up_new_task" ] } ] @@ -117,15 +121,31 @@ void InputEBPFProcessSecurityUnittest::OnSuccessfulInit() { input.reset(new InputEBPFProcessSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_processprobe_security"); - nami::SecurityProcessFilter thisFilter2 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::PROCESS, input->mSecurityOptions.filter_Type); - APSARA_TEST_EQUAL("4026531833", thisFilter2.mNamespaceBlackFilter[0].mValueList[0]); - APSARA_TEST_EQUAL("Pid", thisFilter2.mNamespaceBlackFilter[0].mNamespaceType); - APSARA_TEST_EQUAL("4026531834", thisFilter2.mNamespaceBlackFilter[1].mValueList[0]); - APSARA_TEST_EQUAL("Mnt", thisFilter2.mNamespaceBlackFilter[1].mNamespaceType); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname + + // probeconfig type error + configStr = R"( + { + "Type": "input_ebpf_processprobe_security", + "ProbeConfig": { + "CallNameFilter": [ + "sys_enter_execve", + "disassociate_ctty", + "acct_process", + "wake_up_new_task" + ] + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - // no NamespaceFilter and NamespaceBlackFilter + // no callname configStr = R"( { "Type": "input_ebpf_processprobe_security", @@ -139,26 +159,61 @@ void InputEBPFProcessSecurityUnittest::OnSuccessfulInit() { input.reset(new InputEBPFProcessSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_processprobe_security"); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::PROCESS, input->mSecurityOptions.filter_Type); -} + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname -void InputEBPFProcessSecurityUnittest::OnFailedInit() { - unique_ptr input; - Json::Value configJson, optionalGoPipeline; - string configStr, errorMsg; + // callname typo error + configStr = R"( + { + "Type": "input_ebpf_processprobe_security", + "ProbeConfig": [ + { + "CallNameeee": [ + "sys_enter_execve", + "disassociate_ctty", + "acct_process", + "wake_up_new_task" + ] + } + ] + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - // invalid param + // callname type error configStr = R"( { "Type": "input_ebpf_processprobe_security", "ProbeConfig": [ { - "NamespaceBlackAAAAAAFilter": [ - { - "NamespaceType": "Pid", - "ValueList": "4026531833" - } + "CallName": "sys_enter_execve" + } + ] + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname + + // callname element type error at the first element + configStr = R"( + { + "Type": "input_ebpf_processprobe_security", + "ProbeConfig": [ + { + "CallName": [ + 1, + "disassociate_ctty", + "acct_process", + "wake_up_new_task" ] } ] @@ -168,28 +223,20 @@ void InputEBPFProcessSecurityUnittest::OnFailedInit() { input.reset(new InputEBPFProcessSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_processprobe_security"); - nami::SecurityProcessFilter thisFilter1 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::PROCESS, input->mSecurityOptions.filter_Type); - APSARA_TEST_EQUAL(0, thisFilter1.mNamespaceFilter.size()); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - // invalid param: 1 NamespaceFilter and 1 NamespaceBlackFilter + // callname element type error at the last element configStr = R"( { "Type": "input_ebpf_processprobe_security", "ProbeConfig": [ { - "NamespaceBlackFilter": [ - { - "NamespaceType": "Pid", - "ValueList": ["4026531833"] - } - ], - "NamespaceFilter": [ - { - "NamespaceType": "Pid", - "ValueList": ["4026531833"] - } + "CallName": [ + "disassociate_ctty", + "acct_process", + "wake_up_new_task", + 1 ] } ] @@ -199,54 +246,57 @@ void InputEBPFProcessSecurityUnittest::OnFailedInit() { input.reset(new InputEBPFProcessSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_processprobe_security"); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::PROCESS, input->mSecurityOptions.filter_Type); - nami::SecurityProcessFilter thisFilter2 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - APSARA_TEST_EQUAL(1, thisFilter2.mNamespaceFilter.size()); - APSARA_TEST_EQUAL(0, thisFilter2.mNamespaceBlackFilter.size()); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - // // invalid param: 2 NamespaceFilter - // configStr = R"( - // { - // "Type": "input_ebpf_processprobe_security", - // "ProbeConfig": [ - // { -// "NamespaceFilter": [ -// { -// "NamespaceType": "Pid", -// "ValueList": ["4026531833"] -// } -// ], -// "NamespaceFilter": [ -// { -// "NamespaceType": "Pid", -// "ValueList": ["4026531833"] -// } -// ] - // } - // ] - // } - // )"; - // APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - // input.reset(new InputEBPFProcessSecurity()); - // input->SetContext(ctx); - // APSARA_TEST_FALSE(input->Init(configJson, optionalGoPipeline)); + // null callname + configStr = R"( + { + "Type": "input_ebpf_processprobe_security", + "ProbeConfig": [ + { + "CallNameFilter": [ + ] + } + ] + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname - // error param level + // invalid callname configStr = R"( { "Type": "input_ebpf_processprobe_security", "ProbeConfig": [ { - "NamespaceType": "Pid", - "ValueList": [ - "4026531833" + "CallNameFilter": [ + "sys_enter_execve_error" ] - }, + } + ] + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputEBPFProcessSecurity()); + input->SetContext(ctx); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); // default callname + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_.size(), 5); // default callname + + // invalid callname of two + configStr = R"( + { + "Type": "input_ebpf_processprobe_security", + "ProbeConfig": [ { - "NamespaceType": "Mnt", - "ValueList": [ - "4026531834" + "CallNameFilter": [ + "sys_enter_execve_error", + "disassociate_ctty", ] } ] @@ -256,11 +306,8 @@ void InputEBPFProcessSecurityUnittest::OnFailedInit() { input.reset(new InputEBPFProcessSecurity()); input->SetContext(ctx); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(input->sName, "input_ebpf_processprobe_security"); - nami::SecurityProcessFilter thisFilter3 = std::get(input->mSecurityOptions.mOptionList[0].filter_); - // APSARA_TEST_EQUAL(ebpf::SecurityFilterType::PROCESS, input->mSecurityOptions.filter_Type); - APSARA_TEST_EQUAL(0, thisFilter3.mNamespaceFilter.size()); - APSARA_TEST_EQUAL(0, thisFilter3.mNamespaceBlackFilter.size()); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList.size(), 1); + APSARA_TEST_EQUAL(input->mSecurityOptions.mOptionList[0].call_names_[0], "disassociate_ctty"); } void InputEBPFProcessSecurityUnittest::OnSuccessfulStart() { @@ -268,26 +315,18 @@ void InputEBPFProcessSecurityUnittest::OnSuccessfulStart() { Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - // only NamespaceFilter configStr = R"( { "Type": "input_ebpf_processprobe_security", "ProbeConfig": [ { - "NamespaceFilter": [ - { - "NamespaceType": "Pid", - "ValueList": [ - "4026531833" - ] - }, - { - "NamespaceType": "Mnt", - "ValueList": [ - "4026531834" - ] - } + "CallNameFilter": [ + "sys_enter_execve", + "disassociate_ctty", + "acct_process", + "wake_up_new_task" ] + } ] } @@ -307,25 +346,16 @@ void InputEBPFProcessSecurityUnittest::OnSuccessfulStop() { Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - // only NamespaceFilter configStr = R"( { "Type": "input_ebpf_processprobe_security", "ProbeConfig": [ { - "NamespaceFilter": [ - { - "NamespaceType": "Pid", - "ValueList": [ - "4026531833" - ] - }, - { - "NamespaceType": "Mnt", - "ValueList": [ - "4026531834" - ] - } + "CallNameFilter": [ + "sys_enter_execve", + "disassociate_ctty", + "acct_process", + "wake_up_new_task" ] } ] diff --git a/core/unittest/input/InputFileUnittest.cpp b/core/unittest/input/InputFileUnittest.cpp index 5be2c8fb01..1aa6f2b5d7 100644 --- a/core/unittest/input/InputFileUnittest.cpp +++ b/core/unittest/input/InputFileUnittest.cpp @@ -21,13 +21,13 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "file_server/FileServer.h" -#include "input/InputFile.h" +#include "plugin/input/InputFile.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" -#include "plugin/PluginRegistry.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" -#include "processor/inner/ProcessorTagNative.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "plugin/processor/inner/ProcessorTagNative.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(default_plugin_log_queue_size); @@ -56,6 +56,7 @@ class InputFileUnittest : public testing::Test { void SetUp() override { p.mName = "test_config"; ctx.SetConfigName("test_config"); + p.mPluginID.store(0); ctx.SetPipeline(p); } @@ -68,7 +69,6 @@ void InputFileUnittest::OnSuccessfulInit() { unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - uint32_t pluginIdx = 0; filesystem::path filePath = filesystem::absolute("*.log"); // only mandatory param @@ -174,7 +174,6 @@ void InputFileUnittest::OnSuccessfulInit() { void InputFileUnittest::OnFailedInit() { unique_ptr input; Json::Value configJson, optionalGoPipeline; - uint32_t pluginIdx = 0; input.reset(new InputFile()); input->SetContext(ctx); @@ -186,7 +185,6 @@ void InputFileUnittest::OnEnableContainerDiscovery() { unique_ptr input; Json::Value configJson, optionalGoPipelineJson, optionalGoPipeline; string configStr, optionalGoPipelineStr, errorMsg; - uint32_t pluginIdx = 0; filesystem::path filePath = filesystem::absolute("*.log"); configStr = R"( @@ -207,7 +205,7 @@ void InputFileUnittest::OnEnableContainerDiscovery() { }, "inputs": [ { - "type": "metric_container_info", + "type": "metric_container_info/2", "detail": { "CollectingContainersMeta": true, "FilePattern": "*.log", @@ -224,9 +222,10 @@ void InputFileUnittest::OnEnableContainerDiscovery() { configJson["FilePaths"].append(Json::Value(filePath.string())); optionalGoPipelineJson["global"]["DefaultLogQueueSize"] = Json::Value(INT32_FLAG(default_plugin_log_queue_size)); optionalGoPipelineJson["inputs"][0]["detail"]["LogPath"] = Json::Value(filePath.parent_path().string()); + PluginInstance::PluginMeta meta = ctx.GetPipeline().GenNextPluginMeta(false); input.reset(new InputFile()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, meta.mPluginID, meta.mNodeID, meta.mChildNodeID); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->mEnableContainerDiscovery); APSARA_TEST_TRUE(input->mFileDiscovery.IsContainerDiscoveryEnabled()); @@ -237,7 +236,6 @@ void InputFileUnittest::TestCreateInnerProcessors() { unique_ptr input; Json::Value configJson, optionalGoPipeline; string configStr, errorMsg; - uint32_t pluginIdx = 0; filesystem::path filePath = filesystem::absolute("*.log"); { // no multiline @@ -357,7 +355,6 @@ void InputFileUnittest::OnPipelineUpdate() { InputFile input; input.SetContext(ctx); string configStr, errorMsg; - uint32_t pluginIdx = 0; filesystem::path filePath = filesystem::absolute("*.log"); configStr = R"( diff --git a/core/unittest/input/InputPrometheusUnittest.cpp b/core/unittest/input/InputPrometheusUnittest.cpp index 8d4749fddc..71f87bebd2 100644 --- a/core/unittest/input/InputPrometheusUnittest.cpp +++ b/core/unittest/input/InputPrometheusUnittest.cpp @@ -21,9 +21,9 @@ #include "PluginRegistry.h" #include "app_config/AppConfig.h" #include "common/JsonUtil.h" -#include "inner/ProcessorPromParseMetricNative.h" -#include "inner/ProcessorPromRelabelMetricNative.h" -#include "input/InputPrometheus.h" +#include "plugin/processor/inner/ProcessorPromParseMetricNative.h" +#include "plugin/processor/inner/ProcessorPromRelabelMetricNative.h" +#include "plugin/input/InputPrometheus.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" #include "prometheus/PrometheusInputRunner.h" @@ -134,6 +134,7 @@ void InputPrometheusUnittest::OnSuccessfulInit() { APSARA_TEST_EQUAL(10 * 1024 * 1024, input->mTargetSubscirber->mScrapeConfigPtr->mMaxScrapeSizeBytes); APSARA_TEST_EQUAL(1000000, input->mTargetSubscirber->mScrapeConfigPtr->mSampleLimit); APSARA_TEST_EQUAL(1000000, input->mTargetSubscirber->mScrapeConfigPtr->mSeriesLimit); + PrometheusInputRunner::GetInstance()->Stop(); } void InputPrometheusUnittest::OnFailedInit() { @@ -177,6 +178,7 @@ void InputPrometheusUnittest::OnFailedInit() { input->SetContext(ctx); input->SetMetricsRecordRef(InputPrometheus::sName, "1", "1", "1"); APSARA_TEST_FALSE(input->Init(configJson, optionalGoPipeline)); + PrometheusInputRunner::GetInstance()->Stop(); } void InputPrometheusUnittest::OnPipelineUpdate() { @@ -216,6 +218,7 @@ void InputPrometheusUnittest::OnPipelineUpdate() { APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.find("_arms-prom/node-exporter/0") == PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.end()); + PrometheusInputRunner::GetInstance()->Stop(); } void InputPrometheusUnittest::TestCreateInnerProcessor() { @@ -377,6 +380,7 @@ void InputPrometheusUnittest::TestCreateInnerProcessor() { ->mRelabelConfigs[2] .mAction); } + PrometheusInputRunner::GetInstance()->Stop(); } UNIT_TEST_CASE(InputPrometheusUnittest, OnSuccessfulInit) diff --git a/core/unittest/log_pb/PBUnittest.cpp b/core/unittest/log_pb/PBUnittest.cpp index 815c4b37eb..0e60ffcb96 100644 --- a/core/unittest/log_pb/PBUnittest.cpp +++ b/core/unittest/log_pb/PBUnittest.cpp @@ -14,8 +14,8 @@ #include #include "unittest/Unittest.h" -#include "log_pb/RawLogGroup.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/RawLogGroup.h" +#include "protobuf/sls/sls_logs.pb.h" using namespace std; using namespace sls_logs; diff --git a/core/unittest/models/PipelineEventGroupUnittest.cpp b/core/unittest/models/PipelineEventGroupUnittest.cpp index fbd98a60ca..b0ffd53a45 100644 --- a/core/unittest/models/PipelineEventGroupUnittest.cpp +++ b/core/unittest/models/PipelineEventGroupUnittest.cpp @@ -77,12 +77,11 @@ void PipelineEventGroupUnittest::TestSetMetadata() { } size_t afterAlloc = mSourceBuffer->mAllocator.TotalAllocated(); APSARA_TEST_EQUAL_FATAL(beforeAlloc, afterAlloc); - std::vector> answers = { - {EventGroupMetaKey::LOG_FILE_PATH, "value1"}, - {EventGroupMetaKey::LOG_FILE_PATH_RESOLVED, "value2"}, - {EventGroupMetaKey::LOG_FILE_INODE, "value3"}, - {EventGroupMetaKey::SOURCE_ID, "value4"} - }; + std::vector> answers + = {{EventGroupMetaKey::LOG_FILE_PATH, "value1"}, + {EventGroupMetaKey::LOG_FILE_PATH_RESOLVED, "value2"}, + {EventGroupMetaKey::LOG_FILE_INODE, "value3"}, + {EventGroupMetaKey::SOURCE_ID, "value4"}}; for (const auto kv : answers) { APSARA_TEST_TRUE_FATAL(mEventGroup->HasMetadata(kv.first)); APSARA_TEST_STREQ_FATAL(kv.second.c_str(), mEventGroup->GetMetadata(kv.first).data()); diff --git a/core/unittest/pipeline/CMakeLists.txt b/core/unittest/pipeline/CMakeLists.txt index 1a6a61cb3d..199620da42 100644 --- a/core/unittest/pipeline/CMakeLists.txt +++ b/core/unittest/pipeline/CMakeLists.txt @@ -24,11 +24,11 @@ target_link_libraries(pipeline_unittest ${UT_BASE_TARGET}) add_executable(pipeline_manager_unittest PipelineManagerUnittest.cpp) target_link_libraries(pipeline_manager_unittest ${UT_BASE_TARGET}) -add_executable(process_config_manager_unittest ProcessConfigManagerUnittest.cpp) -target_link_libraries(process_config_manager_unittest ${UT_BASE_TARGET}) +add_executable(instance_config_manager_unittest InstanceConfigManagerUnittest.cpp) +target_link_libraries(instance_config_manager_unittest ${UT_BASE_TARGET}) include(GoogleTest) gtest_discover_tests(global_config_unittest) gtest_discover_tests(pipeline_unittest) gtest_discover_tests(pipeline_manager_unittest) -gtest_discover_tests(process_config_manager_unittest) +gtest_discover_tests(instance_config_manager_unittest) diff --git a/core/unittest/pipeline/ProcessConfigManagerUnittest.cpp b/core/unittest/pipeline/InstanceConfigManagerUnittest.cpp similarity index 54% rename from core/unittest/pipeline/ProcessConfigManagerUnittest.cpp rename to core/unittest/pipeline/InstanceConfigManagerUnittest.cpp index 73a2a879ff..85ba8aaa6b 100644 --- a/core/unittest/pipeline/ProcessConfigManagerUnittest.cpp +++ b/core/unittest/pipeline/InstanceConfigManagerUnittest.cpp @@ -14,8 +14,8 @@ #include "app_config/AppConfig.h" #include "common/JsonUtil.h" -#include "config/ProcessConfig.h" -#include "pipeline/ProcessConfigManager.h" +#include "config/InstanceConfig.h" +#include "pipeline/InstanceConfigManager.h" #include "unittest/Unittest.h" using namespace std; @@ -25,63 +25,63 @@ DECLARE_FLAG_BOOL(enable_flow_control); namespace logtail { -class ProcessConfigManagerUnittest : public testing::Test { +class InstanceConfigManagerUnittest : public testing::Test { public: - void TestUpdateProcessConfigs(); + void TestUpdateInstanceConfigs(); }; -void ProcessConfigManagerUnittest::TestUpdateProcessConfigs() { +void InstanceConfigManagerUnittest::TestUpdateInstanceConfigs() { AppConfig::GetInstance(); // Added { - ProcessConfigDiff configDiff; + InstanceConfigDiff configDiff; std::string content = R"({"enable":true,"max_bytes_per_sec":1234,"mem_usage_limit":456,"cpu_usage_limit":2,"bool":false,"int":-1,"int64":-1000000,"uint":10000,"uint64":100000000000,"double":123123.1,"string":"string","array":[1,2,3],"object":{"a":1}})"; std::string errorMsg; unique_ptr detail = unique_ptr(new Json::Value()); APSARA_TEST_TRUE(ParseJsonTable(content, *detail, errorMsg)); APSARA_TEST_TRUE(errorMsg.empty()); - ProcessConfig config("test1", std::move(detail)); + InstanceConfig config("test1", std::move(detail)); configDiff.mAdded.emplace_back(config); - ProcessConfigManager::GetInstance()->UpdateProcessConfigs(configDiff); + InstanceConfigManager::GetInstance()->UpdateInstanceConfigs(configDiff); - APSARA_TEST_EQUAL(1U, ProcessConfigManager::GetInstance()->GetAllConfigNames().size()); - APSARA_TEST_NOT_EQUAL(nullptr, ProcessConfigManager::GetInstance()->FindConfigByName("test1")); - APSARA_TEST_EQUAL(nullptr, ProcessConfigManager::GetInstance()->FindConfigByName("test3")); + APSARA_TEST_EQUAL(1U, InstanceConfigManager::GetInstance()->GetAllConfigNames().size()); + APSARA_TEST_NOT_EQUAL(nullptr, InstanceConfigManager::GetInstance()->FindConfigByName("test1")); + APSARA_TEST_EQUAL(nullptr, InstanceConfigManager::GetInstance()->FindConfigByName("test3")); } // Modified { - ProcessConfigDiff configDiff; + InstanceConfigDiff configDiff; std::string content = R"({"enable": true,"max_bytes_per_sec": 209715200, "mem_usage_limit":123, "cpu_usage_limit":4,"bool":false,"int":-1,"int64":-1000000,"uint":10000,"uint64":100000000000,"double":123123.1,"string":"string","array":[1,2,3],"object":{"a":1}})"; std::string errorMsg; unique_ptr detail = unique_ptr(new Json::Value()); APSARA_TEST_TRUE(ParseJsonTable(content, *detail, errorMsg)); APSARA_TEST_TRUE(errorMsg.empty()); - ProcessConfig config("test1", std::move(detail)); + InstanceConfig config("test1", std::move(detail)); configDiff.mModified.emplace_back(config); - ProcessConfigManager::GetInstance()->UpdateProcessConfigs(configDiff); + InstanceConfigManager::GetInstance()->UpdateInstanceConfigs(configDiff); - APSARA_TEST_EQUAL(1U, ProcessConfigManager::GetInstance()->GetAllConfigNames().size()); - APSARA_TEST_NOT_EQUAL(nullptr, ProcessConfigManager::GetInstance()->FindConfigByName("test1")); - APSARA_TEST_EQUAL(nullptr, ProcessConfigManager::GetInstance()->FindConfigByName("test3")); - APSARA_TEST_NOT_EQUAL(nullptr, ProcessConfigManager::GetInstance()->FindConfigByName("test1")); + APSARA_TEST_EQUAL(1U, InstanceConfigManager::GetInstance()->GetAllConfigNames().size()); + APSARA_TEST_NOT_EQUAL(nullptr, InstanceConfigManager::GetInstance()->FindConfigByName("test1")); + APSARA_TEST_EQUAL(nullptr, InstanceConfigManager::GetInstance()->FindConfigByName("test3")); + APSARA_TEST_NOT_EQUAL(nullptr, InstanceConfigManager::GetInstance()->FindConfigByName("test1")); } // mRemoved { - ProcessConfigDiff configDiff; + InstanceConfigDiff configDiff; configDiff.mRemoved.emplace_back("test1"); - ProcessConfigManager::GetInstance()->UpdateProcessConfigs(configDiff); + InstanceConfigManager::GetInstance()->UpdateInstanceConfigs(configDiff); - APSARA_TEST_EQUAL(0U, ProcessConfigManager::GetInstance()->GetAllConfigNames().size()); - APSARA_TEST_EQUAL(nullptr, ProcessConfigManager::GetInstance()->FindConfigByName("test1")); - APSARA_TEST_EQUAL(nullptr, ProcessConfigManager::GetInstance()->FindConfigByName("test3")); + APSARA_TEST_EQUAL(0U, InstanceConfigManager::GetInstance()->GetAllConfigNames().size()); + APSARA_TEST_EQUAL(nullptr, InstanceConfigManager::GetInstance()->FindConfigByName("test1")); + APSARA_TEST_EQUAL(nullptr, InstanceConfigManager::GetInstance()->FindConfigByName("test3")); } } -UNIT_TEST_CASE(ProcessConfigManagerUnittest, TestUpdateProcessConfigs) +UNIT_TEST_CASE(InstanceConfigManagerUnittest, TestUpdateInstanceConfigs) } // namespace logtail diff --git a/core/unittest/pipeline/PipelineUnittest.cpp b/core/unittest/pipeline/PipelineUnittest.cpp index e290d391eb..98b41a5ff6 100644 --- a/core/unittest/pipeline/PipelineUnittest.cpp +++ b/core/unittest/pipeline/PipelineUnittest.cpp @@ -18,17 +18,17 @@ #include #include "app_config/AppConfig.h" -#include "batch/TimeoutFlushManager.h" +#include "pipeline/batch/TimeoutFlushManager.h" #include "common/JsonUtil.h" #include "config/PipelineConfig.h" -#include "input/InputFeedbackInterfaceRegistry.h" +#include "plugin/input/InputFeedbackInterfaceRegistry.h" #include "pipeline/Pipeline.h" -#include "plugin/PluginRegistry.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" -#include "queue/BoundedProcessQueue.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "pipeline/queue/BoundedProcessQueue.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" #include "unittest/Unittest.h" #include "unittest/plugin/PluginMock.h" @@ -188,7 +188,7 @@ void PipelineUnittest::OnSuccessfulInit() const { }, "inputs": [ { - "type": "metric_container_info", + "type": "metric_container_info/2", "detail": { "CollectingContainersMeta": true, "LogPath": "/home", @@ -199,7 +199,7 @@ void PipelineUnittest::OnSuccessfulInit() const { ], "extensions": [ { - "type": "ext_basicauth", + "type": "ext_basicauth/7", "detail": {} } ] @@ -213,15 +213,21 @@ void PipelineUnittest::OnSuccessfulInit() const { "DefaultLogQueueSize" : 10, "DefaultLogGroupQueueSize": 3 }, + "aggregators": [ + { + "type": "aggregator_default/5", + "detail": {} + } + ], "flushers": [ { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/6", "detail": {} } ], "extensions": [ { - "type": "ext_basicauth", + "type": "ext_basicauth/7", "detail": {} } ] @@ -576,11 +582,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -602,19 +603,19 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "processors": [ { - "type": "processor_regex", + "type": "processor_regex/4", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/5", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/6", "detail": { "EnableShardHash": false } @@ -633,7 +634,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 5: extended -> extended -> native @@ -649,11 +650,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -674,25 +670,25 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "inputs": [ { - "type": "service_docker_stdout", + "type": "service_docker_stdout/1", "detail": {} } ], "processors": [ { - "type": "processor_regex", + "type": "processor_regex/2", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/3", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/4", "detail": { "EnableShardHash": false } @@ -710,7 +706,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mInputs.size()); APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); - APSARA_TEST_TRUE(goPipelineWithInput == pipeline->mGoPipelineWithInput); + APSARA_TEST_EQUAL(goPipelineWithInput.toStyledString(), pipeline->mGoPipelineWithInput.toStyledString()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithoutInput.isNull()); goPipelineWithInput.clear(); @@ -733,11 +729,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -777,11 +768,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -803,19 +789,19 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "processors": [ { - "type": "processor_regex", + "type": "processor_regex/5", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/6", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/7", "detail": { "EnableShardHash": false } @@ -834,7 +820,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(1U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 8: extended -> (native -> extended) -> native @@ -856,11 +842,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -903,11 +884,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -967,11 +943,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "service_docker_stdout" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -992,19 +963,19 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "inputs": [ { - "type": "service_docker_stdout", + "type": "service_docker_stdout/1", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/2", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/3", "detail": { "EnableShardHash": false } @@ -1022,7 +993,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mInputs.size()); APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); - APSARA_TEST_TRUE(goPipelineWithInput == pipeline->mGoPipelineWithInput); + APSARA_TEST_EQUAL(goPipelineWithInput.toStyledString(), pipeline->mGoPipelineWithInput.toStyledString()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithoutInput.isNull()); goPipelineWithInput.clear(); @@ -1040,11 +1011,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "service_docker_stdout" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -1081,11 +1047,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Keys": ["key"] } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1102,13 +1063,13 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/5", "detail": {} } ], "flushers": [ { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/6", "detail": {} } ] @@ -1125,7 +1086,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(1U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(0U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 14: extended -> native -> extended @@ -1144,11 +1105,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Keys": ["key"] } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1183,11 +1139,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Keys": ["key"] } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1216,11 +1167,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1237,19 +1183,19 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "processors": [ { - "type": "processor_regex", + "type": "processor_regex/4", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/5", "detail": {} } ], "flushers": [ { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/6", "detail": {} } ] @@ -1266,7 +1212,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(0U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 17: extended -> extended -> extended @@ -1282,11 +1228,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1302,25 +1243,25 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "inputs": [ { - "type": "service_docker_stdout", + "type": "service_docker_stdout/1", "detail": {} } ], "processors": [ { - "type": "processor_regex", + "type": "processor_regex/2", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/3", "detail": {} } ], "flushers": [ { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/4", "detail": {} } ] @@ -1336,7 +1277,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mInputs.size()); APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(0U, pipeline->GetFlushers().size()); - APSARA_TEST_TRUE(goPipelineWithInput == pipeline->mGoPipelineWithInput); + APSARA_TEST_EQUAL(goPipelineWithInput.toStyledString(), pipeline->mGoPipelineWithInput.toStyledString()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithoutInput.isNull()); goPipelineWithInput.clear(); @@ -1359,11 +1300,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1398,11 +1334,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1419,19 +1350,19 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "processors": [ { - "type": "processor_regex", + "type": "processor_regex/5", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/6", "detail": {} } ], "flushers": [ { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/7", "detail": {} } ] @@ -1448,7 +1379,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(1U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(0U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 20: extended -> (native -> extended) -> extended @@ -1470,11 +1401,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1512,11 +1438,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1540,11 +1461,6 @@ void PipelineUnittest::OnInitVariousTopology() const { ] } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1561,13 +1477,13 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/4", "detail": {} } ], "flushers": [ { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/5", "detail": {} } ] @@ -1584,7 +1500,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(0U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 23: extended -> none -> extended @@ -1595,11 +1511,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "service_docker_stdout" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1615,19 +1526,19 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "inputs": [ { - "type": "service_docker_stdout", + "type": "service_docker_stdout/1", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/2", "detail": {} } ], "flushers": [ { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/3", "detail": {} } ] @@ -1643,7 +1554,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mInputs.size()); APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(0U, pipeline->GetFlushers().size()); - APSARA_TEST_TRUE(goPipelineWithInput == pipeline->mGoPipelineWithInput); + APSARA_TEST_EQUAL(goPipelineWithInput.toStyledString(), pipeline->mGoPipelineWithInput.toStyledString()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithoutInput.isNull()); goPipelineWithInput.clear(); @@ -1661,11 +1572,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "service_docker_stdout" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_kafka_v2" @@ -1697,11 +1603,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Keys": ["key"] } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -1726,19 +1627,19 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/5", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/6", "detail": { "EnableShardHash": false } }, { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/7", "detail": {} } ] @@ -1755,7 +1656,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(1U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 26: extended -> native -> (native, extended) @@ -1774,11 +1675,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Keys": ["key"] } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -1821,11 +1717,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Keys": ["key"] } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -1862,11 +1753,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -1891,25 +1777,25 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "processors": [ { - "type": "processor_regex", + "type": "processor_regex/4", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/5", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/6", "detail": { "EnableShardHash": false } }, { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/7", "detail": {} } ] @@ -1926,7 +1812,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 29: extended -> extended -> (native, extended) @@ -1942,11 +1828,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -1970,31 +1851,31 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "inputs": [ { - "type": "service_docker_stdout", + "type": "service_docker_stdout/1", "detail": {} } ], "processors": [ { - "type": "processor_regex", + "type": "processor_regex/2", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/3", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/4", "detail": { "EnableShardHash": false } }, { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/5", "detail": {} } ] @@ -2010,7 +1891,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mInputs.size()); APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); - APSARA_TEST_TRUE(goPipelineWithInput == pipeline->mGoPipelineWithInput); + APSARA_TEST_EQUAL(goPipelineWithInput.toStyledString(), pipeline->mGoPipelineWithInput.toStyledString()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithoutInput.isNull()); goPipelineWithInput.clear(); @@ -2033,11 +1914,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -2080,11 +1956,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -2109,25 +1980,25 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "processors": [ { - "type": "processor_regex", + "type": "processor_regex/5", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/6", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/7", "detail": { "EnableShardHash": false } }, { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/8", "detail": {} } ] @@ -2144,7 +2015,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(1U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 32: extended -> (native -> extended) -> (native, extended) @@ -2166,11 +2037,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -2216,11 +2082,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "processor_regex" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -2252,11 +2113,6 @@ void PipelineUnittest::OnInitVariousTopology() const { ] } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -2281,19 +2137,19 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/4", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/5", "detail": { "EnableShardHash": false } }, { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/6", "detail": {} } ] @@ -2310,7 +2166,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithInput.isNull()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithoutInput.clear(); // topology 35: extended -> none -> (native, extended) @@ -2321,11 +2177,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "service_docker_stdout" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -2349,25 +2200,25 @@ void PipelineUnittest::OnInitVariousTopology() const { }, "inputs": [ { - "type": "service_docker_stdout", + "type": "service_docker_stdout/1", "detail": {} } ], "aggregators": [ { - "type": "aggregator_context", + "type": "aggregator_default/2", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/3", "detail": { "EnableShardHash": false } }, { - "type": "flusher_kafka_v2", + "type": "flusher_kafka_v2/4", "detail": {} } ] @@ -2383,7 +2234,7 @@ void PipelineUnittest::OnInitVariousTopology() const { APSARA_TEST_EQUAL(0U, pipeline->mInputs.size()); APSARA_TEST_EQUAL(0U, pipeline->mProcessorLine.size()); APSARA_TEST_EQUAL(1U, pipeline->GetFlushers().size()); - APSARA_TEST_TRUE(goPipelineWithInput == pipeline->mGoPipelineWithInput); + APSARA_TEST_EQUAL(goPipelineWithInput.toStyledString(), pipeline->mGoPipelineWithInput.toStyledString()); APSARA_TEST_TRUE(pipeline->mGoPipelineWithoutInput.isNull()); goPipelineWithInput.clear(); @@ -2401,11 +2252,6 @@ void PipelineUnittest::OnInitVariousTopology() const { "Type": "service_docker_stdout" } ], - "aggregators": [ - { - "Type": "aggregator_context" - } - ], "flushers": [ { "Type": "flusher_sls", @@ -2707,7 +2553,7 @@ void PipelineUnittest::OnInputFileWithContainerDiscovery() const { }, "inputs": [ { - "type": "metric_container_info", + "type": "metric_container_info/2", "detail": { "CollectingContainersMeta": true, "LogPath": "/home", @@ -2769,7 +2615,7 @@ void PipelineUnittest::OnInputFileWithContainerDiscovery() const { }, "inputs": [ { - "type": "metric_container_info", + "type": "metric_container_info/2", "detail": { "CollectingContainersMeta": true, "LogPath": "/home", @@ -2789,13 +2635,19 @@ void PipelineUnittest::OnInputFileWithContainerDiscovery() const { }, "processors": [ { - "type": "processor_regex", + "type": "processor_regex/5", + "detail": {} + } + ], + "aggregators": [ + { + "type": "aggregator_default/6", "detail": {} } ], "flushers": [ { - "type": "flusher_sls", + "type": "flusher_sls/7", "detail": { "EnableShardHash": false } @@ -2812,7 +2664,7 @@ void PipelineUnittest::OnInputFileWithContainerDiscovery() const { pipeline.reset(new Pipeline()); APSARA_TEST_TRUE(pipeline->Init(std::move(*config))); APSARA_TEST_EQUAL(goPipelineWithInput.toStyledString(), pipeline->mGoPipelineWithInput.toStyledString()); - APSARA_TEST_TRUE(goPipelineWithoutInput == pipeline->mGoPipelineWithoutInput); + APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); goPipelineWithInput.clear(); goPipelineWithoutInput.clear(); } diff --git a/core/unittest/plugin/FlusherInstanceUnittest.cpp b/core/unittest/plugin/FlusherInstanceUnittest.cpp index 0a5a1b0e14..533a43dfd5 100644 --- a/core/unittest/plugin/FlusherInstanceUnittest.cpp +++ b/core/unittest/plugin/FlusherInstanceUnittest.cpp @@ -14,8 +14,8 @@ #include -#include "plugin/instance/FlusherInstance.h" -#include "queue/QueueKeyManager.h" +#include "pipeline/plugin/instance/FlusherInstance.h" +#include "pipeline/queue/QueueKeyManager.h" #include "unittest/Unittest.h" #include "unittest/plugin/PluginMock.h" diff --git a/core/unittest/plugin/InputInstanceUnittest.cpp b/core/unittest/plugin/InputInstanceUnittest.cpp index fae7840dd5..3736bd01ba 100644 --- a/core/unittest/plugin/InputInstanceUnittest.cpp +++ b/core/unittest/plugin/InputInstanceUnittest.cpp @@ -14,8 +14,8 @@ #include -#include "plugin/creator/StaticProcessorCreator.h" -#include "plugin/instance/InputInstance.h" +#include "pipeline/plugin/creator/StaticProcessorCreator.h" +#include "pipeline/plugin/instance/InputInstance.h" #include "unittest/Unittest.h" #include "unittest/plugin/PluginMock.h" diff --git a/core/unittest/plugin/PluginMock.h b/core/unittest/plugin/PluginMock.h index 82b0c5fd29..a2c5ccfbc5 100644 --- a/core/unittest/plugin/PluginMock.h +++ b/core/unittest/plugin/PluginMock.h @@ -19,15 +19,15 @@ #include #include "pipeline/Pipeline.h" -#include "plugin/PluginRegistry.h" -#include "plugin/creator/StaticFlusherCreator.h" -#include "plugin/creator/StaticInputCreator.h" -#include "plugin/creator/StaticProcessorCreator.h" -#include "plugin/interface/Flusher.h" -#include "plugin/interface/HttpFlusher.h" -#include "plugin/interface/Input.h" -#include "plugin/interface/Processor.h" -#include "queue/SenderQueueManager.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "pipeline/plugin/creator/StaticFlusherCreator.h" +#include "pipeline/plugin/creator/StaticInputCreator.h" +#include "pipeline/plugin/creator/StaticProcessorCreator.h" +#include "pipeline/plugin/interface/Flusher.h" +#include "pipeline/plugin/interface/HttpFlusher.h" +#include "pipeline/plugin/interface/Input.h" +#include "pipeline/plugin/interface/Processor.h" +#include "pipeline/queue/SenderQueueManager.h" namespace logtail { diff --git a/core/unittest/plugin/PluginRegistryUnittest.cpp b/core/unittest/plugin/PluginRegistryUnittest.cpp index 0bc0cd39e6..d777d71f5e 100644 --- a/core/unittest/plugin/PluginRegistryUnittest.cpp +++ b/core/unittest/plugin/PluginRegistryUnittest.cpp @@ -14,11 +14,11 @@ #include -#include "plugin/PluginRegistry.h" -#include "plugin/creator/StaticFlusherCreator.h" -#include "plugin/creator/StaticInputCreator.h" -#include "plugin/creator/StaticProcessorCreator.h" -#include "sender/FlusherRunner.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "pipeline/plugin/creator/StaticFlusherCreator.h" +#include "pipeline/plugin/creator/StaticInputCreator.h" +#include "pipeline/plugin/creator/StaticProcessorCreator.h" +#include "runner/FlusherRunner.h" #include "unittest/Unittest.h" #include "unittest/plugin/PluginMock.h" diff --git a/core/unittest/plugin/ProcessorInstanceUnittest.cpp b/core/unittest/plugin/ProcessorInstanceUnittest.cpp index 543ba7bf1f..b545602a3f 100644 --- a/core/unittest/plugin/ProcessorInstanceUnittest.cpp +++ b/core/unittest/plugin/ProcessorInstanceUnittest.cpp @@ -14,7 +14,7 @@ #include -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" #include "unittest/Unittest.h" #include "unittest/plugin/PluginMock.h" diff --git a/core/unittest/plugin/StaticFlusherCreatorUnittest.cpp b/core/unittest/plugin/StaticFlusherCreatorUnittest.cpp index 6c04f27a85..99031a2897 100644 --- a/core/unittest/plugin/StaticFlusherCreatorUnittest.cpp +++ b/core/unittest/plugin/StaticFlusherCreatorUnittest.cpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/creator/StaticFlusherCreator.h" -#include "plugin/instance/PluginInstance.h" +#include "pipeline/plugin/creator/StaticFlusherCreator.h" +#include "pipeline/plugin/instance/PluginInstance.h" #include "unittest/plugin/PluginMock.h" #include "unittest/Unittest.h" diff --git a/core/unittest/plugin/StaticInputCreatorUnittest.cpp b/core/unittest/plugin/StaticInputCreatorUnittest.cpp index cfa69f127c..d8f810bee4 100644 --- a/core/unittest/plugin/StaticInputCreatorUnittest.cpp +++ b/core/unittest/plugin/StaticInputCreatorUnittest.cpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/creator/StaticInputCreator.h" -#include "plugin/instance/PluginInstance.h" +#include "pipeline/plugin/creator/StaticInputCreator.h" +#include "pipeline/plugin/instance/PluginInstance.h" #include "unittest/plugin/PluginMock.h" #include "unittest/Unittest.h" diff --git a/core/unittest/plugin/StaticProcessorCreatorUnittest.cpp b/core/unittest/plugin/StaticProcessorCreatorUnittest.cpp index a3c91cb98e..a27691ebcd 100644 --- a/core/unittest/plugin/StaticProcessorCreatorUnittest.cpp +++ b/core/unittest/plugin/StaticProcessorCreatorUnittest.cpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/creator/StaticProcessorCreator.h" -#include "plugin/instance/PluginInstance.h" +#include "pipeline/plugin/creator/StaticProcessorCreator.h" +#include "pipeline/plugin/instance/PluginInstance.h" #include "unittest/plugin/PluginMock.h" #include "unittest/Unittest.h" diff --git a/core/unittest/polling/PollingUnittest.cpp b/core/unittest/polling/PollingUnittest.cpp index 33ea7e7a23..3efdc1700b 100644 --- a/core/unittest/polling/PollingUnittest.cpp +++ b/core/unittest/polling/PollingUnittest.cpp @@ -15,15 +15,15 @@ #include "unittest/Unittest.h" #include "common/Flags.h" #include -#include "controller/EventDispatcher.h" -#include "config_manager/ConfigManager.h" +#include "file_server/EventDispatcher.h" +#include "file_server/ConfigManager.h" #include "app_config/AppConfig.h" -#include "reader/LogFileReader.h" -#include "event_handler/EventHandler.h" +#include "file_server/reader/LogFileReader.h" +#include "file_server/event_handler/EventHandler.h" #include "monitor/Monitor.h" #include "common/StringTools.h" #include "logger/Logger.h" -#include "event/Event.h" +#include "file_server/event/Event.h" #if defined(__linux__) #include #include @@ -43,18 +43,18 @@ #include #include #include -#include "log_pb/metric.pb.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/metric.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "monitor/LogtailAlarm.h" -#include "event_handler/LogInput.h" +#include "file_server/event_handler/LogInput.h" #include "common/FileEncryption.h" #include "common/FileSystemUtil.h" #include #include #include -#include "polling/PollingDirFile.h" -#include "polling/PollingModify.h" -#include "polling/PollingEventQueue.h" +#include "file_server/polling/PollingDirFile.h" +#include "file_server/polling/PollingModify.h" +#include "file_server/polling/PollingEventQueue.h" using namespace std; using namespace sls_logs; diff --git a/core/unittest/processor/ParseContainerLogBenchmark.cpp b/core/unittest/processor/ParseContainerLogBenchmark.cpp index b89d73e142..2cdf22358b 100644 --- a/core/unittest/processor/ParseContainerLogBenchmark.cpp +++ b/core/unittest/processor/ParseContainerLogBenchmark.cpp @@ -19,8 +19,8 @@ #include "config/PipelineConfig.h" #include "models/LogEvent.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/inner/ProcessorParseContainerLogNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" #include "unittest/Unittest.h" diff --git a/core/unittest/processor/ProcessorDesensitizeNativeUnittest.cpp b/core/unittest/processor/ProcessorDesensitizeNativeUnittest.cpp index 712ae2f4f2..816fb57ad9 100644 --- a/core/unittest/processor/ProcessorDesensitizeNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorDesensitizeNativeUnittest.cpp @@ -13,11 +13,11 @@ // limitations under the License. #include "common/JsonUtil.h" #include "models/LogEvent.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/ProcessorDesensitizeNative.h" -#include "processor/inner/ProcessorMergeMultilineLogNative.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/ProcessorDesensitizeNative.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorFilterNativeUnittest.cpp b/core/unittest/processor/ProcessorFilterNativeUnittest.cpp index 7e499cb08b..2f02dd45d4 100644 --- a/core/unittest/processor/ProcessorFilterNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorFilterNativeUnittest.cpp @@ -13,8 +13,8 @@ // limitations under the License. #include "common/ExceptionBase.h" #include "common/JsonUtil.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/ProcessorFilterNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/ProcessorFilterNative.h" #include "unittest/Unittest.h" using boost::regex; diff --git a/core/unittest/processor/ProcessorMergeMultilineLogNativeUnittest.cpp b/core/unittest/processor/ProcessorMergeMultilineLogNativeUnittest.cpp index 24fac5cd97..f5b0045ec7 100644 --- a/core/unittest/processor/ProcessorMergeMultilineLogNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorMergeMultilineLogNativeUnittest.cpp @@ -17,8 +17,8 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" #include "models/LogEvent.h" -#include "processor/inner/ProcessorMergeMultilineLogNative.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorParseApsaraNativeUnittest.cpp b/core/unittest/processor/ProcessorParseApsaraNativeUnittest.cpp index fc77e1c827..5b0678b7fe 100644 --- a/core/unittest/processor/ProcessorParseApsaraNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseApsaraNativeUnittest.cpp @@ -18,11 +18,11 @@ #include "config/PipelineConfig.h" #include "models/LogEvent.h" #include "models/StringView.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/inner/ProcessorMergeMultilineLogNative.h" -#include "processor/ProcessorParseApsaraNative.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/ProcessorParseApsaraNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorParseContainerLogNativeUnittest.cpp b/core/unittest/processor/ProcessorParseContainerLogNativeUnittest.cpp index 328f94439b..7db7651b05 100644 --- a/core/unittest/processor/ProcessorParseContainerLogNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseContainerLogNativeUnittest.cpp @@ -25,9 +25,9 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" #include "models/LogEvent.h" -#include "processor/inner/ProcessorMergeMultilineLogNative.h" -#include "processor/inner/ProcessorParseContainerLogNative.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/inner/ProcessorParseContainerLogNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorParseDelimiterNativeUnittest.cpp b/core/unittest/processor/ProcessorParseDelimiterNativeUnittest.cpp index 49ae17fe81..f4be52bdf6 100644 --- a/core/unittest/processor/ProcessorParseDelimiterNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseDelimiterNativeUnittest.cpp @@ -17,11 +17,11 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" #include "models/LogEvent.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/ProcessorParseDelimiterNative.h" -#include "processor/inner/ProcessorMergeMultilineLogNative.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/ProcessorParseDelimiterNative.h" +#include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorParseJsonNativeUnittest.cpp b/core/unittest/processor/ProcessorParseJsonNativeUnittest.cpp index d324759a25..7493da0843 100644 --- a/core/unittest/processor/ProcessorParseJsonNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseJsonNativeUnittest.cpp @@ -16,9 +16,9 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" #include "models/LogEvent.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/ProcessorParseJsonNative.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/ProcessorParseJsonNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorParseRegexNativeUnittest.cpp b/core/unittest/processor/ProcessorParseRegexNativeUnittest.cpp index 4f61e176a3..c38c6298d2 100644 --- a/core/unittest/processor/ProcessorParseRegexNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseRegexNativeUnittest.cpp @@ -17,8 +17,8 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" #include "models/LogEvent.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/ProcessorParseRegexNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/ProcessorParseRegexNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorParseTimestampNativeUnittest.cpp b/core/unittest/processor/ProcessorParseTimestampNativeUnittest.cpp index db8703836b..7af6786275 100644 --- a/core/unittest/processor/ProcessorParseTimestampNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseTimestampNativeUnittest.cpp @@ -19,8 +19,8 @@ #include "common/JsonUtil.h" #include "common/TimeUtil.h" #include "config/PipelineConfig.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/ProcessorParseTimestampNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/ProcessorParseTimestampNative.h" #include "unittest/Unittest.h" using namespace logtail; diff --git a/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp b/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp index ca4eeaa69d..fcdbe4e6bd 100644 --- a/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp @@ -18,7 +18,8 @@ #include "MetricEvent.h" #include "StringTools.h" #include "common/JsonUtil.h" -#include "processor/inner/ProcessorPromParseMetricNative.h" +#include "models/PipelineEventGroup.h" +#include "plugin/processor/inner/ProcessorPromParseMetricNative.h" #include "prometheus/Constants.h" #include "prometheus/labels/TextParser.h" #include "prometheus/schedulers/ScrapeScheduler.h" @@ -74,7 +75,7 @@ void ProcessorParsePrometheusMetricUnittest::TestProcess() { // make events auto parser = TextParser(); - auto splitByLines = [](const std::string& content, time_t timestamp) { + auto splitByLines = [](const std::string& content) { PipelineEventGroup eGroup(std::make_shared()); for (const auto& line : SplitString(content, "\r\n")) { @@ -84,7 +85,6 @@ void ProcessorParsePrometheusMetricUnittest::TestProcess() { } auto* MetricEvent = eGroup.AddLogEvent(); MetricEvent->SetContent(prometheus::PROMETHEUS, newLine); - MetricEvent->SetTimestamp(timestamp); } return eGroup; @@ -102,8 +102,10 @@ void ProcessorParsePrometheusMetricUnittest::TestProcess() { test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 # end - )""", - 0); + )"""); + // set timestamp in nanoseconds + auto timestampMilliSec = GetCurrentTimeInMilliSeconds(); + eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, ToString(timestampMilliSec)); // run function APSARA_TEST_EQUAL((size_t)8, eventGroup.GetEvents().size()); @@ -119,6 +121,10 @@ void ProcessorParsePrometheusMetricUnittest::TestProcess() { APSARA_TEST_EQUAL("test_metric6", eventGroup.GetEvents().at(5).Cast().GetName()); APSARA_TEST_EQUAL("test_metric7", eventGroup.GetEvents().at(6).Cast().GetName()); APSARA_TEST_EQUAL("test_metric8", eventGroup.GetEvents().at(7).Cast().GetName()); + + // judge timestamp + APSARA_TEST_EQUAL(time_t(timestampMilliSec / 1000), + eventGroup.GetEvents().at(0).Cast().GetTimestamp()); } UNIT_TEST_CASE(ProcessorParsePrometheusMetricUnittest, TestInit) diff --git a/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp b/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp index 10cd8536ff..7eb982a98c 100644 --- a/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp @@ -14,10 +14,12 @@ * limitations under the License. */ -#include "MetricEvent.h" #include "TextParser.h" #include "common/JsonUtil.h" -#include "processor/inner/ProcessorPromRelabelMetricNative.h" +#include "common/StringTools.h" +#include "models/MetricEvent.h" +#include "plugin/processor/inner/ProcessorPromRelabelMetricNative.h" +#include "prometheus/Constants.h" #include "unittest/Unittest.h" using namespace std; @@ -29,6 +31,7 @@ class ProcessorPromRelabelMetricNativeUnittest : public testing::Test { void TestInit(); void TestProcess(); + void TestAddAutoMetrics(); PipelineContext mContext; }; @@ -39,7 +42,8 @@ void ProcessorPromRelabelMetricNativeUnittest::TestInit() { processor.SetContext(mContext); // success config - string configStr, errorMsg; + string configStr; + string errorMsg; configStr = R"JSON( { "metric_relabel_configs": [ @@ -77,7 +81,8 @@ void ProcessorPromRelabelMetricNativeUnittest::TestProcess() { ProcessorPromRelabelMetricNative processor; processor.SetContext(mContext); - string configStr, errorMsg; + string configStr; + string errorMsg; configStr = configStr + R"( { "metric_relabel_configs": [ @@ -112,7 +117,7 @@ void ProcessorPromRelabelMetricNativeUnittest::TestProcess() { // make events auto parser = TextParser(); - auto eventGroup = parser.Parse(R"""( + string rawData = R"""( # begin test_metric1{k1="v1", k2="v2"} 1.0 @@ -125,7 +130,8 @@ test_metric7{k1="v1",k3="2", } 9.9410452992e+10 1715829785083 test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 # end - )"""); + )"""; + auto eventGroup = parser.Parse(rawData, 0, 0); // run function std::string pluginId = "testID"; @@ -144,8 +150,76 @@ test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 // test_metric8 is dropped by relabel config } +void ProcessorPromRelabelMetricNativeUnittest::TestAddAutoMetrics() { + // make config + Json::Value config; + + ProcessorPromRelabelMetricNative processor; + processor.SetContext(mContext); + + string configStr; + string errorMsg; + configStr = configStr + R"( + { + "job_name": "test_job", + "scrape_timeout": "15s", + "sample_limit": 1000, + "series_limit": 1000 + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + + // init + APSARA_TEST_TRUE(processor.Init(config)); + + // make events + auto parser = TextParser(); + auto eventGroup = parser.Parse(R"""( +# begin +test_metric1{k1="v1", k2="v2"} 1.0 + test_metric2{k1="v1", k2="v2"} 2.0 1234567890 +test_metric3{k1="v1",k2="v2"} 9.9410452992e+10 + test_metric4{k1="v1",k2="v2"} 9.9410452992e+10 1715829785083 + test_metric5{k1="v1", k2="v2" } 9.9410452992e+10 1715829785083 +test_metric6{k1="v1",k2="v2",} 9.9410452992e+10 1715829785083 +test_metric7{k1="v1",k3="2", } 9.9410452992e+10 1715829785083 +test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 +# end + )""", + 0, + 0); + + APSARA_TEST_EQUAL((size_t)8, eventGroup.GetEvents().size()); + + // without metadata + processor.AddAutoMetrics(eventGroup); + APSARA_TEST_EQUAL((size_t)8, eventGroup.GetEvents().size()); + + // with metadata + eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, ToString(1715829785083)); + eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED, ToString(8)); + eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION, ToString(1.5)); + eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE, ToString(2325)); + eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE, ToString(true)); + eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_INSTANCE, string("localhost:8080")); + processor.AddAutoMetrics(eventGroup); + + APSARA_TEST_EQUAL((size_t)15, eventGroup.GetEvents().size()); + APSARA_TEST_EQUAL(1.5, eventGroup.GetEvents().at(8).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL(2325, eventGroup.GetEvents().at(9).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL(1000, eventGroup.GetEvents().at(10).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL(8, eventGroup.GetEvents().at(11).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL(8, eventGroup.GetEvents().at(12).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL(15, eventGroup.GetEvents().at(13).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL(1, eventGroup.GetEvents().at(14).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL("localhost:8080", eventGroup.GetEvents().at(14).Cast().GetTag("instance")); + APSARA_TEST_EQUAL("test_job", eventGroup.GetEvents().at(14).Cast().GetTag("job")); +} + UNIT_TEST_CASE(ProcessorPromRelabelMetricNativeUnittest, TestInit) UNIT_TEST_CASE(ProcessorPromRelabelMetricNativeUnittest, TestProcess) +UNIT_TEST_CASE(ProcessorPromRelabelMetricNativeUnittest, TestAddAutoMetrics) + } // namespace logtail diff --git a/core/unittest/processor/ProcessorSplitLogStringNativeUnittest.cpp b/core/unittest/processor/ProcessorSplitLogStringNativeUnittest.cpp index 2f9d3f56f3..aa30e8e87f 100644 --- a/core/unittest/processor/ProcessorSplitLogStringNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorSplitLogStringNativeUnittest.cpp @@ -18,8 +18,8 @@ #include "common/Constants.h" #include "common/JsonUtil.h" #include "config/PipelineConfig.h" -#include "plugin/instance/ProcessorInstance.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorSplitMultilineLogStringNativeUnittest.cpp b/core/unittest/processor/ProcessorSplitMultilineLogStringNativeUnittest.cpp index 9b46e78b85..61ceeddc30 100644 --- a/core/unittest/processor/ProcessorSplitMultilineLogStringNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorSplitMultilineLogStringNativeUnittest.cpp @@ -17,8 +17,8 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" #include "models/LogEvent.h" -#include "processor/inner/ProcessorSplitLogStringNative.h" -#include "processor/inner/ProcessorSplitMultilineLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitLogStringNative.h" +#include "plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/processor/ProcessorTagNativeUnittest.cpp b/core/unittest/processor/ProcessorTagNativeUnittest.cpp index 6798ca8507..7855259280 100644 --- a/core/unittest/processor/ProcessorTagNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorTagNativeUnittest.cpp @@ -16,9 +16,9 @@ #include "common/Constants.h" #include "config/PipelineConfig.h" -#include "config_manager/ConfigManager.h" +#include "file_server/ConfigManager.h" #include "pipeline/Pipeline.h" -#include "processor/inner/ProcessorTagNative.h" +#include "plugin/processor/inner/ProcessorTagNative.h" #include "unittest/Unittest.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" diff --git a/core/unittest/prometheus/CMakeLists.txt b/core/unittest/prometheus/CMakeLists.txt index eb5f95d8f1..5657470e7e 100644 --- a/core/unittest/prometheus/CMakeLists.txt +++ b/core/unittest/prometheus/CMakeLists.txt @@ -40,6 +40,9 @@ target_link_libraries(textparser_benchmark ${UT_BASE_TARGET}) add_executable(scrape_config_unittest ScrapeConfigUnittest.cpp) target_link_libraries(scrape_config_unittest ${UT_BASE_TARGET}) +add_executable(prom_utils_unittest UtilsUnittest.cpp) +target_link_libraries(prom_utils_unittest ${UT_BASE_TARGET}) + include(GoogleTest) gtest_discover_tests(labels_unittest) @@ -50,3 +53,4 @@ gtest_discover_tests(prometheus_input_runner_unittest) gtest_discover_tests(textparser_unittest) gtest_discover_tests(textparser_benchmark) gtest_discover_tests(scrape_config_unittest) +gtest_discover_tests(prom_utils_unittest) diff --git a/core/unittest/prometheus/PrometheusInputRunnerUnittest.cpp b/core/unittest/prometheus/PrometheusInputRunnerUnittest.cpp index bd9b7c2f91..ef261324b5 100644 --- a/core/unittest/prometheus/PrometheusInputRunnerUnittest.cpp +++ b/core/unittest/prometheus/PrometheusInputRunnerUnittest.cpp @@ -54,65 +54,13 @@ void InputRunnerMockHttpClient::Send(const std::string&, const std::string&, const bool) { httpMessage.statusCode = 200; - if (url.find("/jobs") == 0) { - httpMessage.content = R"( - [{ - "targets": [ - "192.168.22.7:8080" - ], - "labels": { - "__meta_kubernetes_pod_controller_kind": "ReplicaSet", - "__meta_kubernetes_pod_container_image": "registry-vpc.cn-hangzhou.aliyuncs.com/acs/kube-state-metrics:v2.3.0-a71f78c-aliyun", - "__meta_kubernetes_namespace": "arms-prom", - "__meta_kubernetes_pod_labelpresent_pod_template_hash": "true", - "__meta_kubernetes_pod_uid": "00d1897f-d442-47c4-8423-e9bf32dea173", - "__meta_kubernetes_pod_container_init": "false", - "__meta_kubernetes_pod_container_port_protocol": "TCP", - "__meta_kubernetes_pod_host_ip": "192.168.21.234", - "__meta_kubernetes_pod_controller_name": "kube-state-metrics-64cf88c8f4", - "__meta_kubernetes_pod_annotation_k8s_aliyun_com_pod_ips": "192.168.22.7", - "__meta_kubernetes_pod_ready": "true", - "__meta_kubernetes_pod_node_name": "cn-hangzhou.192.168.21.234", - "__meta_kubernetes_pod_annotationpresent_k8s_aliyun_com_pod_ips": "true", - "__address__": "192.168.22.7:8080", - "__meta_kubernetes_pod_labelpresent_k8s_app": "true", - "__meta_kubernetes_pod_label_k8s_app": "kube-state-metrics", - "__meta_kubernetes_pod_container_id": "containerd://57c4dfd8d9ea021defb248dfbc5cc3bd3758072c4529be351b8cc6838bdff02f", - "__meta_kubernetes_pod_container_port_number": "8080", - "__meta_kubernetes_pod_ip": "192.168.22.7", - "__meta_kubernetes_pod_phase": "Running", - "__meta_kubernetes_pod_container_name": "kube-state-metrics", - "__meta_kubernetes_pod_container_port_name": "http-metrics", - "__meta_kubernetes_pod_label_pod_template_hash": "64cf88c8f4", - "__meta_kubernetes_pod_name": "kube-state-metrics-64cf88c8f4-jtn6v" - } - }, - { - "targets": [ - "192.168.22.31:6443" - ], - "labels": { - "__address__": "192.168.22.31:6443", - "__meta_kubernetes_endpoint_port_protocol": "TCP", - "__meta_kubernetes_service_label_provider": "kubernetes", - "__meta_kubernetes_endpoints_name": "kubernetes", - "__meta_kubernetes_service_name": "kubernetes", - "__meta_kubernetes_endpoints_labelpresent_endpointslice_kubernetes_io_skip_mirror": "true", - "__meta_kubernetes_service_labelpresent_provider": "true", - "__meta_kubernetes_endpoint_port_name": "https", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_component": "apiserver", - "__meta_kubernetes_service_labelpresent_component": "true", - "__meta_kubernetes_endpoint_ready": "true" - } - }] - )"; - } } class PrometheusInputRunnerUnittest : public testing::Test { public: void OnSuccessfulStartAndStop(); + void TestHasRegisteredPlugins(); + void TestMulitStartAndStop(); protected: void SetUp() override { @@ -151,7 +99,7 @@ void PrometheusInputRunnerUnittest::OnSuccessfulStartAndStop() { // update scrapeJob PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(scrapeJobPtr)); - PrometheusInputRunner::GetInstance()->Start(); + PrometheusInputRunner::GetInstance()->Init(); APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.find("test_job") != PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.end()); @@ -165,7 +113,68 @@ void PrometheusInputRunnerUnittest::OnSuccessfulStartAndStop() { PrometheusInputRunner::GetInstance()->Stop(); } +void PrometheusInputRunnerUnittest::TestHasRegisteredPlugins() { + PrometheusInputRunner::GetInstance()->mClient = make_unique(); + PrometheusInputRunner::GetInstance()->Init(); + + // not in use + APSARA_TEST_FALSE(PrometheusInputRunner::GetInstance()->HasRegisteredPlugins()); + + // in use + PrometheusInputRunner::GetInstance()->Init(); + string errorMsg; + string configStr; + Json::Value config; + configStr = R"JSON( + { + "job_name": "test_job", + "scheme": "http", + "metrics_path": "/metrics", + "scrape_interval": "30s", + "scrape_timeout": "30s" + } + )JSON"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + + std::unique_ptr scrapeJobPtr = make_unique(); + APSARA_TEST_TRUE(scrapeJobPtr->Init(config)); + PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(scrapeJobPtr)); + APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->HasRegisteredPlugins()); + PrometheusInputRunner::GetInstance()->Stop(); +} + +void PrometheusInputRunnerUnittest::TestMulitStartAndStop() { + PrometheusInputRunner::GetInstance()->mClient = make_unique(); + PrometheusInputRunner::GetInstance()->Init(); + { + std::lock_guard lock(PrometheusInputRunner::GetInstance()->mStartMutex); + APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->mIsStarted); + } + PrometheusInputRunner::GetInstance()->Init(); + { + std::lock_guard lock(PrometheusInputRunner::GetInstance()->mStartMutex); + APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->mIsStarted); + } + PrometheusInputRunner::GetInstance()->Stop(); + { + std::lock_guard lock(PrometheusInputRunner::GetInstance()->mStartMutex); + APSARA_TEST_FALSE(PrometheusInputRunner::GetInstance()->mIsStarted); + } + PrometheusInputRunner::GetInstance()->Init(); + { + std::lock_guard lock(PrometheusInputRunner::GetInstance()->mStartMutex); + APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->mIsStarted); + } + PrometheusInputRunner::GetInstance()->Stop(); + { + std::lock_guard lock(PrometheusInputRunner::GetInstance()->mStartMutex); + APSARA_TEST_FALSE(PrometheusInputRunner::GetInstance()->mIsStarted); + } +} + UNIT_TEST_CASE(PrometheusInputRunnerUnittest, OnSuccessfulStartAndStop) +UNIT_TEST_CASE(PrometheusInputRunnerUnittest, TestHasRegisteredPlugins) +UNIT_TEST_CASE(PrometheusInputRunnerUnittest, TestMulitStartAndStop) } // namespace logtail diff --git a/core/unittest/prometheus/ScrapeConfigUnittest.cpp b/core/unittest/prometheus/ScrapeConfigUnittest.cpp index 020c3e5196..8560892071 100644 --- a/core/unittest/prometheus/ScrapeConfigUnittest.cpp +++ b/core/unittest/prometheus/ScrapeConfigUnittest.cpp @@ -1,6 +1,8 @@ +#include #include +#include "FileSystemUtil.h" #include "JsonUtil.h" #include "json/value.h" #include "prometheus/schedulers/ScrapeConfig.h" @@ -12,8 +14,27 @@ namespace logtail { class ScrapeConfigUnittest : public testing::Test { public: void TestInit(); + void TestAuth(); + void TestBasicAuth(); + void TestAuthorization(); + +private: + void SetUp() override; + void TearDown() override; + + string mFilePath = "prom_password.file"; + string mKey = "test_password.file"; }; +void ScrapeConfigUnittest::SetUp() { + // create test_password.file + OverwriteFile(mFilePath, mKey); +} + +void ScrapeConfigUnittest::TearDown() { + remove(mFilePath.c_str()); +} + void ScrapeConfigUnittest::TestInit() { Json::Value config; ScrapeConfig scrapeConfig; @@ -28,42 +49,52 @@ void ScrapeConfigUnittest::TestInit() { APSARA_TEST_FALSE(scrapeConfig.Init(config)); // all useful config - configStr = R"JSON({ - "job_name": "test_job", - "scheme": "http", - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "max_scrape_size": "1024MiB", - "sample_limit": 10000, - "series_limit": 10000, - "relabel_configs": [ - { - "action": "keep", - "regex": "kube-state-metrics", - "replacement": "$1", - "separator": ";", - "source_labels": [ - "__meta_kubernetes_pod_label_k8s_app" + { + configStr = R"JSON({ + "job_name": "test_job", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "basic_auth": { + "username": "test_user", + "password": "test_password" + }, + "max_scrape_size": "1024MiB", + "sample_limit": 10000, + "series_limit": 10000, + "relabel_configs": [ + { + "action": "keep", + "regex": "kube-state-metrics", + "replacement": "$1", + "separator": ";", + "source_labels": [ + "__meta_kubernetes_pod_label_k8s_app" + ] + } + ], + "params" : { + "__param_query": [ + "test_query" + ], + "__param_query_1": [ + "test_query_1" ] } - ], - "params" : { - "__param_query": [ - "test_query" - ], - "__param_query_1": [ - "test_query_1" - ] - } - })JSON"; + })JSON"; + } APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); APSARA_TEST_TRUE(scrapeConfig.Init(config)); APSARA_TEST_EQUAL(scrapeConfig.mJobName, "test_job"); - APSARA_TEST_EQUAL(scrapeConfig.mScheme, "http"); - APSARA_TEST_EQUAL(scrapeConfig.mMetricsPath, "/metrics"); APSARA_TEST_EQUAL(scrapeConfig.mScrapeIntervalSeconds, 30); APSARA_TEST_EQUAL(scrapeConfig.mScrapeTimeoutSeconds, 30); + APSARA_TEST_EQUAL(scrapeConfig.mMetricsPath, "/metrics"); + APSARA_TEST_EQUAL(scrapeConfig.mScheme, "http"); + + // basic auth + APSARA_TEST_EQUAL(scrapeConfig.mAuthHeaders["Authorization"], "Basic dGVzdF91c2VyOnRlc3RfcGFzc3dvcmQ="); + APSARA_TEST_EQUAL(scrapeConfig.mMaxScrapeSizeBytes, 1024 * 1024 * 1024); APSARA_TEST_EQUAL(scrapeConfig.mSampleLimit, 10000); APSARA_TEST_EQUAL(scrapeConfig.mSeriesLimit, 10000); @@ -72,7 +103,138 @@ void ScrapeConfigUnittest::TestInit() { APSARA_TEST_EQUAL(scrapeConfig.mParams["__param_query_1"][0], "test_query_1"); } +void ScrapeConfigUnittest::TestAuth() { + Json::Value config; + ScrapeConfig scrapeConfig; + string errorMsg; + string configStr; + + // error config + configStr = R"JSON({ + "job_name": "test_job", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "basic_auth": { + "username": "test_user", + "password": "test_password" + }, + "authorization": { + "type": "Bearer", + "credentials": "test_token" + } + })JSON"; + + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + APSARA_TEST_FALSE(scrapeConfig.Init(config)); +} + +void ScrapeConfigUnittest::TestBasicAuth() { + Json::Value config; + ScrapeConfig scrapeConfig; + string errorMsg; + string configStr; + + configStr = R"JSON({ + "job_name": "test_job", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "basic_auth": { + "username": "test_user", + "password": "test_password" + } + })JSON"; + + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + APSARA_TEST_TRUE(scrapeConfig.Init(config)); + APSARA_TEST_EQUAL(scrapeConfig.mAuthHeaders["Authorization"], "Basic dGVzdF91c2VyOnRlc3RfcGFzc3dvcmQ="); + + scrapeConfig.mAuthHeaders.clear(); + configStr = R"JSON({ + "job_name": "test_job", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "basic_auth": { + "username": "test_user", + "password_file": "prom_password.file" + } + })JSON"; + + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + APSARA_TEST_TRUE(scrapeConfig.Init(config)); + APSARA_TEST_EQUAL(scrapeConfig.mAuthHeaders["Authorization"], "Basic dGVzdF91c2VyOnRlc3RfcGFzc3dvcmQuZmlsZQ=="); + + // error + scrapeConfig.mAuthHeaders.clear(); + configStr = R"JSON({ + "job_name": "test_job", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "basic_auth": { + "username": "test_user", + "password": "test_password", + "password_file": "prom_password.file" + } + })JSON"; + + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + APSARA_TEST_FALSE(scrapeConfig.Init(config)); +} + +void ScrapeConfigUnittest::TestAuthorization() { + Json::Value config; + ScrapeConfig scrapeConfig; + string errorMsg; + string configStr; + configStr = R"JSON({ + "job_name": "test_job", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "authorization": { + "type": "Bearer", + "credentials": "test_token" + } + })JSON"; + + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + scrapeConfig.mAuthHeaders.clear(); + APSARA_TEST_TRUE(scrapeConfig.Init(config)); + // bearer auth + APSARA_TEST_EQUAL(scrapeConfig.mAuthHeaders["Authorization"], "Bearer test_token"); + + scrapeConfig.mAuthHeaders.clear(); + + // default Bearer auth + configStr = R"JSON({ + "job_name": "test_job", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "authorization": { + "credentials_file": "prom_password.file" + } + })JSON"; + + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + scrapeConfig.mAuthHeaders.clear(); + APSARA_TEST_TRUE(scrapeConfig.Init(config)); + APSARA_TEST_EQUAL(scrapeConfig.mAuthHeaders["Authorization"], "Bearer " + mKey); +} + UNIT_TEST_CASE(ScrapeConfigUnittest, TestInit); +UNIT_TEST_CASE(ScrapeConfigUnittest, TestAuth); +UNIT_TEST_CASE(ScrapeConfigUnittest, TestBasicAuth); +UNIT_TEST_CASE(ScrapeConfigUnittest, TestAuthorization); } // namespace logtail diff --git a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp index 37e32bba70..dd2d7b7260 100644 --- a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp +++ b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp @@ -19,7 +19,9 @@ #include #include "common/StringTools.h" +#include "common/timer/Timer.h" #include "prometheus/Constants.h" +#include "prometheus/async/PromFuture.h" #include "prometheus/labels/Labels.h" #include "prometheus/schedulers/ScrapeConfig.h" #include "prometheus/schedulers/ScrapeScheduler.h" @@ -29,12 +31,24 @@ using namespace std; namespace logtail { +class MockTimer : public Timer { +public: + void Init() {} + void PushEvent(std::unique_ptr&& e) { mQueue.push_back(std::move(e)); } + void Stop() {} + std::vector> mQueue; +}; + class ScrapeSchedulerUnittest : public testing::Test { public: void TestInitscrapeScheduler(); void TestProcess(); void TestSplitByLines(); void TestReceiveMessage(); + void TestGetRandSleep(); + + void TestScheduler(); + protected: void SetUp() override { @@ -44,7 +58,7 @@ class ScrapeSchedulerUnittest : public testing::Test { mScrapeConfig->mScrapeIntervalSeconds = 10; mScrapeConfig->mScrapeTimeoutSeconds = 10; mScrapeConfig->mMetricsPath = "/metrics"; - mScrapeConfig->mHeaders = {{"Authorization", "Bearer xxxxx"}}; + mScrapeConfig->mAuthHeaders = {{"Authorization", "Bearer xxxxx"}}; mHttpResponse.mBody = "# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n" @@ -91,13 +105,14 @@ void ScrapeSchedulerUnittest::TestProcess() { ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); APSARA_TEST_EQUAL(event.GetId(), "test_jobhttp://localhost:8080/metrics" + ToString(labels.Hash())); // if status code is not 200, no data will be processed + // but will continue running, sending self-monitoring metrics mHttpResponse.mStatusCode = 503; - event.OnMetricResult(mHttpResponse); - APSARA_TEST_EQUAL(0UL, event.mItem.size()); + event.OnMetricResult(mHttpResponse, 0); + APSARA_TEST_EQUAL(1UL, event.mItem.size()); event.mItem.clear(); mHttpResponse.mStatusCode = 200; - event.OnMetricResult(mHttpResponse); + event.OnMetricResult(mHttpResponse, 0); APSARA_TEST_EQUAL(1UL, event.mItem.size()); APSARA_TEST_EQUAL(11UL, event.mItem[0]->mEventGroup.GetEvents().size()); } @@ -108,7 +123,7 @@ void ScrapeSchedulerUnittest::TestSplitByLines() { labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); APSARA_TEST_EQUAL(event.GetId(), "test_jobhttp://localhost:8080/metrics" + ToString(labels.Hash())); - auto res = event.BuildPipelineEventGroup(mHttpResponse.mBody, 0); + auto res = event.BuildPipelineEventGroup(mHttpResponse.mBody); APSARA_TEST_EQUAL(11UL, res.GetEvents().size()); APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0\"} 1.5531e-05", res.GetEvents()[0].Cast().GetContent(prometheus::PROMETHEUS).to_string()); @@ -149,9 +164,37 @@ void ScrapeSchedulerUnittest::TestReceiveMessage() { APSARA_TEST_EQUAL(false, event->IsCancelled()); } +void ScrapeSchedulerUnittest::TestGetRandSleep() { + Labels labels; + labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); + ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); + + Labels labels2; + labels2.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:9090"}); + ScrapeScheduler event2(mScrapeConfig, "localhost", 9090, labels, 0, 0); + APSARA_TEST_NOT_EQUAL(event.GetRandSleep(), event2.GetRandSleep()); +} + +void ScrapeSchedulerUnittest::TestScheduler() { + Labels labels; + labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); + ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); + auto timer = make_shared(); + event.SetTimer(timer); + event.ScheduleNext(); + + APSARA_TEST_TRUE(timer->mQueue.size() == 1); + + event.Cancel(); + + APSARA_TEST_TRUE(event.mValidState == false); + APSARA_TEST_TRUE(event.mFuture->mState == PromFutureState::Done); +} + UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestInitscrapeScheduler) UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestProcess) UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestSplitByLines) +UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestGetRandSleep) } // namespace logtail diff --git a/core/unittest/prometheus/TargetSubscriberSchedulerUnittest.cpp b/core/unittest/prometheus/TargetSubscriberSchedulerUnittest.cpp index e4db364cc3..fdd8d47d8e 100644 --- a/core/unittest/prometheus/TargetSubscriberSchedulerUnittest.cpp +++ b/core/unittest/prometheus/TargetSubscriberSchedulerUnittest.cpp @@ -153,20 +153,18 @@ void TargetSubscriberSchedulerUnittest::OnInitScrapeJobEvent() { APSARA_TEST_EQUAL(targetSubscriber->mJobName, "_kube-state-metrics"); } - - void TargetSubscriberSchedulerUnittest::TestProcess() { std::shared_ptr targetSubscriber = std::make_shared(); APSARA_TEST_TRUE(targetSubscriber->Init(mConfig["ScrapeConfig"])); // if status code is not 200 mHttpResponse.mStatusCode = 404; - targetSubscriber->OnSubscription(mHttpResponse); + targetSubscriber->OnSubscription(mHttpResponse, 0); APSARA_TEST_EQUAL(0UL, targetSubscriber->mScrapeSchedulerMap.size()); // if status code is 200 mHttpResponse.mStatusCode = 200; - targetSubscriber->OnSubscription(mHttpResponse); + targetSubscriber->OnSubscription(mHttpResponse, 0); APSARA_TEST_EQUAL(2UL, targetSubscriber->mScrapeSchedulerMap.size()); } diff --git a/core/unittest/prometheus/TextParserBenchmark.cpp b/core/unittest/prometheus/TextParserBenchmark.cpp index 5e4241897c..7af03beb72 100644 --- a/core/unittest/prometheus/TextParserBenchmark.cpp +++ b/core/unittest/prometheus/TextParserBenchmark.cpp @@ -26,14 +26,23 @@ namespace logtail { class TextParserBenchmark : public testing::Test { public: void TestParse100M() const; + void TestParse1000M() const; protected: void SetUp() override { + m100MData.reserve(100 * 1024 * 1024); // 100MB - int totalSize = 100 * 1024 * 1024; - while (totalSize > 0) { + int repeatCnt = 100 * 1024 * 1024 / mRawData.size(); + while (repeatCnt > 0) { m100MData += mRawData; - totalSize -= mRawData.size(); + repeatCnt -= 1; + } + + m1000MData.reserve(1000 * 1024 * 1024); + repeatCnt = 1000 * 1024 * 1024 / mRawData.size(); + while (repeatCnt > 0) { + m1000MData += mRawData; + repeatCnt -= 1; } } @@ -47,23 +56,39 @@ test_metric5{k1="v1",k2="v2",} 9.9410452992e+10 1715829785083 test_metric6{k1="v1",k2="v2", } 9.9410452992e+10 1715829785083 test_metric7{k1="v1", k2="v2", } 9.9410452992e+10 1715829785083 test_metric8{k1="v1", k2="v2", } 9.9410452992e+10 1715829785083 - )"""; +)"""; std::string m100MData; + std::string m1000MData; }; void TextParserBenchmark::TestParse100M() const { auto start = std::chrono::high_resolution_clock::now(); TextParser parser; - parser.Parse(m100MData); + auto res = parser.Parse(m100MData, 0, 0); + + auto end = std::chrono::high_resolution_clock::now(); + std::chrono::duration elapsed = end - start; + cout << "elapsed: " << elapsed.count() << " seconds" << endl; + // elapsed: 1.53s in release mode + // elapsed: 551MB in release mode +} + +void TextParserBenchmark::TestParse1000M() const { + auto start = std::chrono::high_resolution_clock::now(); + + TextParser parser; + auto res = parser.Parse(m1000MData, 0, 0); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration elapsed = end - start; cout << "elapsed: " << elapsed.count() << " seconds" << endl; - // elapsed: 10.1014 seconds in release mode + // elapsed: 15.4s in release mode + // elapsed: 4960MB in release mode } UNIT_TEST_CASE(TextParserBenchmark, TestParse100M) +UNIT_TEST_CASE(TextParserBenchmark, TestParse1000M) } // namespace logtail diff --git a/core/unittest/prometheus/TextParserUnittest.cpp b/core/unittest/prometheus/TextParserUnittest.cpp index e4fa7f6bd2..fc575dd4d1 100644 --- a/core/unittest/prometheus/TextParserUnittest.cpp +++ b/core/unittest/prometheus/TextParserUnittest.cpp @@ -25,39 +25,19 @@ using namespace std; namespace logtail { -bool isDoubleEqual(double a, double b) { +bool IsDoubleEqual(double a, double b) { return fabs(a - b) < 0.000001; } class TextParserUnittest : public testing::Test { public: - void TestMetricEvent() const; void TestParseMultipleLines() const; - void TestSampleRegex() const; void TestParseMetricWithTagsAndTimestamp() const; void TestParseMetricWithManyTags() const; -}; -void TextParserUnittest::TestMetricEvent() const { - const auto& srcBuf = make_shared(); - auto eGroup = PipelineEventGroup(srcBuf); - auto event = eGroup.AddMetricEvent(); - event->SetName("test_metric"); - event->SetValue(MetricValue(UntypedSingleValue{1.0})); - event->SetTimestamp(1234567890); - event->SetTag(StringView("test_key"), StringView("test_value")); - - const auto& events = eGroup.GetEvents(); - APSARA_TEST_EQUAL(1UL, events.size()); - const auto& firstEvent = &events.front(); - const auto& firstMetric = firstEvent->Get(); - APSARA_TEST_STREQ("test_metric", firstMetric->GetName().data()); - const auto& metricValue = firstMetric->GetValue(); - APSARA_TEST_EQUAL(1.0, metricValue->mValue); - APSARA_TEST_EQUAL(1234567890, firstMetric->GetTimestamp()); - APSARA_TEST_STREQ("test_value", firstMetric->GetTag(logtail::StringView("test_key")).data()); -} -UNIT_TEST_CASE(TextParserUnittest, TestMetricEvent) + void TestParseFaliure(); + void TestParseSuccess(); +}; void TextParserUnittest::TestParseMultipleLines() const { auto parser = TextParser(); @@ -74,7 +54,9 @@ test_metric7{k1="v1",k2="v2", } 9.9410452992e+10 1715829785083 test_metric8{k1="v1", k2="v2", } 9.9410452992e+10 1715829785083 # end - )"""); + )""", + 0, + 0); const auto& events = &eGroup.GetEvents(); APSARA_TEST_EQUAL(7UL, events->size()); } @@ -82,30 +64,33 @@ UNIT_TEST_CASE(TextParserUnittest, TestParseMultipleLines) void TextParserUnittest::TestParseMetricWithTagsAndTimestamp() const { auto parser = TextParser(); - const auto eGroup = parser.Parse(R"""( + string rawData = R"""( test_metric{k1="v1", k2="v2"} 9.9410452992e+10 1715829785083 test_metric2{k1="v1", k2="v2"} 2.0 1715829785083 test_metric3{k1="v1",k2="v2"} 4.2 92233720368547758080000 - )"""); + )"""; + const auto eGroup = parser.Parse(rawData, 0, 0); + // test_metric const auto& events = &eGroup.GetEvents(); const auto& event = events->front(); const auto& metric = event.Get(); - APSARA_TEST_STREQ("test_metric", metric->GetName().data()); + APSARA_TEST_EQUAL("test_metric", metric->GetName().to_string()); APSARA_TEST_EQUAL(1715829785, metric->GetTimestamp()); - APSARA_TEST_TRUE(isDoubleEqual(9.9410452992e+10, metric->GetValue()->mValue)); - APSARA_TEST_STREQ("v1", metric->GetTag("k1").data()); - APSARA_TEST_STREQ("v2", metric->GetTag("k2").data()); + APSARA_TEST_EQUAL(83000000, metric->GetTimestampNanosecond()); + APSARA_TEST_TRUE(IsDoubleEqual(9.9410452992e+10, metric->GetValue()->mValue)); + APSARA_TEST_EQUAL("v1", metric->GetTag("k1").to_string()); + APSARA_TEST_EQUAL("v2", metric->GetTag("k2").to_string()); // test_metric2 - auto& event2 = events->at(1); + const auto& event2 = events->at(1); const auto& metric2 = event2.Get(); - APSARA_TEST_STREQ("test_metric2", metric2->GetName().data()); + APSARA_TEST_EQUAL("test_metric2", metric2->GetName().to_string()); APSARA_TEST_EQUAL(1715829785, metric2->GetTimestamp()); - APSARA_TEST_TRUE(isDoubleEqual(2.0, metric2->GetValue()->mValue)); - APSARA_TEST_STREQ("v1", metric2->GetTag("k1").data()); - APSARA_TEST_STREQ("v2", metric2->GetTag("k2").data()); + APSARA_TEST_TRUE(IsDoubleEqual(2.0, metric2->GetValue()->mValue)); + APSARA_TEST_EQUAL("v1", metric2->GetTag("k1").to_string()); + APSARA_TEST_EQUAL("v2", metric2->GetTag("k2").to_string()); // test_metric3 is not generated because of timestamp overflow APSARA_TEST_EQUAL(2UL, events->size()); @@ -114,33 +99,233 @@ UNIT_TEST_CASE(TextParserUnittest, TestParseMetricWithTagsAndTimestamp) void TextParserUnittest::TestParseMetricWithManyTags() const { auto parser = TextParser(); - const auto eGroup = parser.Parse( - R"""(container_blkio_device_usage_total{container="",device="/dev/nvme0n1",id="/",image="",major="259",minor="0",name="",namespace="",operation="Async",pod=""} 9.9410452992e+10 1715829785083)""", - 1715829785083, - "test_job", - "test_instance"); + string rawData + = R"""(container_blkio_device_usage_total{container="",device="/dev/nvme0n1",id="/",image="",major="259",minor="0",name="",namespace="",operation="Async",pod=""} 9.9410452992e+10 1715829785083)"""; + const auto eGroup = parser.Parse(rawData, 1715829785, 83000000); const auto& events = &eGroup.GetEvents(); APSARA_TEST_EQUAL(1UL, events->size()); const auto& event = events->front(); const auto& metric = event.Get(); - APSARA_TEST_STREQ("container_blkio_device_usage_total", metric->GetName().data()); + APSARA_TEST_EQUAL("container_blkio_device_usage_total", metric->GetName().to_string()); APSARA_TEST_EQUAL(1715829785, metric->GetTimestamp()); - APSARA_TEST_TRUE(isDoubleEqual(9.9410452992e+10, metric->GetValue()->mValue)); - - // TODO: assert number of tags - APSARA_TEST_STREQ("", metric->GetTag("container").data()); - APSARA_TEST_STREQ("/dev/nvme0n1", metric->GetTag("device").data()); - APSARA_TEST_STREQ("/", metric->GetTag("id").data()); - APSARA_TEST_STREQ("", metric->GetTag("image").data()); - APSARA_TEST_STREQ("259", metric->GetTag("major").data()); - APSARA_TEST_STREQ("0", metric->GetTag("minor").data()); - APSARA_TEST_STREQ("", metric->GetTag("name").data()); - APSARA_TEST_STREQ("", metric->GetTag("namespace").data()); - APSARA_TEST_STREQ("Async", metric->GetTag("operation").data()); - APSARA_TEST_STREQ("", metric->GetTag("pod").data()); + APSARA_TEST_TRUE(IsDoubleEqual(9.9410452992e+10, metric->GetValue()->mValue)); + + APSARA_TEST_EQUAL("", metric->GetTag("container").to_string()); + APSARA_TEST_EQUAL("/dev/nvme0n1", metric->GetTag("device").to_string()); + APSARA_TEST_EQUAL("/", metric->GetTag("id").to_string()); + APSARA_TEST_EQUAL("", metric->GetTag("image").to_string()); + APSARA_TEST_EQUAL("259", metric->GetTag("major").to_string()); + APSARA_TEST_EQUAL("0", metric->GetTag("minor").to_string()); + APSARA_TEST_EQUAL("", metric->GetTag("name").to_string()); + APSARA_TEST_EQUAL("", metric->GetTag("namespace").to_string()); + APSARA_TEST_EQUAL("Async", metric->GetTag("operation").to_string()); + APSARA_TEST_EQUAL("", metric->GetTag("pod").to_string()); } UNIT_TEST_CASE(TextParserUnittest, TestParseMetricWithManyTags) +void TextParserUnittest::TestParseFaliure() { + auto f = [](const std::string& content) { + TextParser parser; + PipelineEventGroup eGroup = parser.Parse(content, 0, 0); + APSARA_TEST_EQUAL(0UL, eGroup.GetEvents().size()); + }; + + // Empty lines and comments + f(""); + f(" "); + f("\t"); + f("\t \r"); + f("\t\t \n\n # foobar"); + f("#foobar"); + f("#foobar\n"); + + // invalid tags + f("a{"); + f("a { "); + f("a {foo"); + f("a {foo} 3"); + f("a {foo ="); + f("a {foo =\"bar"); + f("a {foo =\"b\\ar"); + f("a {foo = \"bar\""); + f("a {foo =\"bar\","); + f("a {foo =\"bar\" , "); + f("a {foo =\"bar\" , baz } 2"); + + // Invalid tags - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4284 + f(R"(a{"__name__":"upsd_time_left_ns","host":"myhost", "status_OB":"true"} 12)"); + f(R"(a{host:"myhost"} 12)"); + f(R"(a{host:"myhost",foo="bar"} 12)"); + + // Empty metric name + f(R"({foo="bar"})"); + + // Invalid quotes for label value + f(R"({foo='bar'} 23)"); + f(R"({foo=`bar`} 23"); + + // Missing value + f("aaa"); + f(" aaa"); + f(" aaa "); + f(" aaa \n"); + f(R"( aa{foo="bar"} )" + + std::string("\n")); + + // Invalid value + f("foo bar"); + f("foo bar 124"); + + // Invalid timestamp + f("foo 123 bar"); +} +UNIT_TEST_CASE(TextParserUnittest, TestParseFaliure) + +void TextParserUnittest::TestParseSuccess() { + TextParser parser; + string rawData; + // single value + rawData = "foobar 123"; + auto res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "foobar"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 123.0)); + + rawData = "foobar 123.456 789\n"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "foobar"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 123.456)); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTimestamp(), 789); + + rawData = R"( + # TYPE cassandra_token_ownership_ratio gauge +cassandra_token_ownership_ratio 78.9)"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), + "cassandra_token_ownership_ratio"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 78.9)); + + // `#` char in label value + rawData = R"(foo{bar="#1 az"} 24)"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "foo"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("bar").to_string(), "#1 az"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 24.0)); + + // Incorrectly escaped backlash. This is real-world case, which must be supported. + rawData = R"(mssql_sql_server_active_transactions_sec{loginname="domain\somelogin",env="develop"} 56)"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), + "mssql_sql_server_active_transactions_sec"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("loginname").to_string(), "domain\\somelogin"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("env").to_string(), "develop"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 56.0)); + + rawData = R"(foo_bucket{le="10",a="#b"} 17)"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "foo_bucket"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("le").to_string(), "10"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("a").to_string(), "#b"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 17.0)); + + // "Infinity" word - this has been added in OpenMetrics. + // See https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md + // Checks for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/924 + rawData = R"(foo Infinity + bar +Infinity + baz -infinity + aaa +inf + bbb -INF + ccc INF)"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().size(), 6UL); + APSARA_TEST_EQUAL(res.GetEvents()[0].Cast().GetName().to_string(), "foo"); + APSARA_TEST_EQUAL(res.GetEvents()[0].Cast().GetValue()->mValue, + std::numeric_limits::infinity()); + APSARA_TEST_EQUAL(res.GetEvents()[1].Cast().GetName().to_string(), "bar"); + APSARA_TEST_EQUAL(res.GetEvents()[1].Cast().GetValue()->mValue, + std::numeric_limits::infinity()); + APSARA_TEST_EQUAL(res.GetEvents()[2].Cast().GetName().to_string(), "baz"); + APSARA_TEST_EQUAL(res.GetEvents()[2].Cast().GetValue()->mValue, + -std::numeric_limits::infinity()); + APSARA_TEST_EQUAL(res.GetEvents()[3].Cast().GetName().to_string(), "aaa"); + APSARA_TEST_EQUAL(res.GetEvents()[3].Cast().GetValue()->mValue, + std::numeric_limits::infinity()); + APSARA_TEST_EQUAL(res.GetEvents()[4].Cast().GetName().to_string(), "bbb"); + APSARA_TEST_EQUAL(res.GetEvents()[4].Cast().GetValue()->mValue, + -std::numeric_limits::infinity()); + APSARA_TEST_EQUAL(res.GetEvents()[5].Cast().GetName().to_string(), "ccc"); + APSARA_TEST_EQUAL(res.GetEvents()[5].Cast().GetValue()->mValue, + std::numeric_limits::infinity()); + + // tags + rawData = R"(foo{bar="b\"a\\z"} -1.2)"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "foo"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("bar").to_string(), "b\"a\\z"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, -1.2)); + + // Empty tags + rawData = R"(foo {bar="baz",aa="",x="y"} 1 2)"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "foo"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("bar").to_string(), "baz"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("aa").to_string(), ""); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("x").to_string(), "y"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 1.0)); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTimestamp(), 2); + + // Multi lines with invalid line + rawData = "\t foo\t { } 0.3\t 2\naaa\n barbaz 0.34 43\n"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().size(), 2UL); + APSARA_TEST_EQUAL(res.GetEvents()[0].Cast().GetName().to_string(), "foo"); + APSARA_TEST_TRUE(IsDoubleEqual(res.GetEvents()[0].Cast().GetValue()->mValue, 0.3)); + APSARA_TEST_EQUAL(res.GetEvents()[0].Cast().GetTimestamp(), 2); + APSARA_TEST_EQUAL(res.GetEvents()[1].Cast().GetName().to_string(), "barbaz"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents()[1].Cast().GetValue()->mValue, 0.34)); + APSARA_TEST_EQUAL(res.GetEvents()[1].Cast().GetTimestamp(), 43); + + // Spaces around tags + rawData = R"(vm_accounting { name="vminsertRows", accountID = "1" , projectID= "1" } 277779100)"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "vm_accounting"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("name").to_string(), "vminsertRows"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("accountID").to_string(), "1"); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTag("projectID").to_string(), "1"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 277779100.0)); + + // Exemplars + rawData = "abc 123 456 # foobar"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "abc"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 123.0)); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTimestamp(), 456); + + // float timestamp + rawData = "abc 123 456.789"; + res = parser.Parse(rawData, 0, 0); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetName().to_string(), "abc"); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetValue()->mValue, 123.0)); + APSARA_TEST_TRUE(IsDoubleEqual(res.GetEvents().back().Cast().GetTimestamp(), 456)); + APSARA_TEST_TRUE( + IsDoubleEqual(res.GetEvents().back().Cast().GetTimestampNanosecond().value(), 789000000)); +} + +UNIT_TEST_CASE(TextParserUnittest, TestParseSuccess) + + } // namespace logtail UNIT_TEST_MAIN diff --git a/core/unittest/prometheus/UtilsUnittest.cpp b/core/unittest/prometheus/UtilsUnittest.cpp new file mode 100644 index 0000000000..a6ebd4f01e --- /dev/null +++ b/core/unittest/prometheus/UtilsUnittest.cpp @@ -0,0 +1,23 @@ + +#include "models/StringView.h" +#include "prometheus/Utils.h" +#include "unittest/Unittest.h" + +using namespace std; + +namespace logtail { + +bool IsDoubleEqual(double a, double b) { + return fabs(a - b) < 0.000001; +} + +class PromUtilsUnittest : public testing::Test { +public: +}; + + + + +} // namespace logtail + +UNIT_TEST_MAIN diff --git a/core/unittest/queue/BoundedProcessQueueUnittest.cpp b/core/unittest/queue/BoundedProcessQueueUnittest.cpp index 9421715945..c36299ad86 100644 --- a/core/unittest/queue/BoundedProcessQueueUnittest.cpp +++ b/core/unittest/queue/BoundedProcessQueueUnittest.cpp @@ -16,8 +16,8 @@ #include "common/FeedbackInterface.h" #include "models/PipelineEventGroup.h" -#include "queue/BoundedProcessQueue.h" -#include "queue/SenderQueue.h" +#include "pipeline/queue/BoundedProcessQueue.h" +#include "pipeline/queue/SenderQueue.h" #include "unittest/Unittest.h" #include "unittest/queue/FeedbackInterfaceMock.h" diff --git a/core/unittest/queue/CircularProcessQueueUnittest.cpp b/core/unittest/queue/CircularProcessQueueUnittest.cpp index 0ad0c43bbb..1bed2ded03 100644 --- a/core/unittest/queue/CircularProcessQueueUnittest.cpp +++ b/core/unittest/queue/CircularProcessQueueUnittest.cpp @@ -15,8 +15,8 @@ #include #include "models/PipelineEventGroup.h" -#include "queue/CircularProcessQueue.h" -#include "queue/SenderQueue.h" +#include "pipeline/queue/CircularProcessQueue.h" +#include "pipeline/queue/SenderQueue.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/queue/ExactlyOnceQueueManagerUnittest.cpp b/core/unittest/queue/ExactlyOnceQueueManagerUnittest.cpp index 582536eff3..4adaffeccb 100644 --- a/core/unittest/queue/ExactlyOnceQueueManagerUnittest.cpp +++ b/core/unittest/queue/ExactlyOnceQueueManagerUnittest.cpp @@ -14,12 +14,12 @@ #include -#include "flusher/sls/FlusherSLS.h" -#include "input/InputFeedbackInterfaceRegistry.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "plugin/input/InputFeedbackInterfaceRegistry.h" #include "models/PipelineEventGroup.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/QueueKeyManager.h" -#include "queue/SLSSenderQueueItem.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SLSSenderQueueItem.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(logtail_queue_gc_threshold_sec); diff --git a/core/unittest/queue/ExactlyOnceSenderQueueUnittest.cpp b/core/unittest/queue/ExactlyOnceSenderQueueUnittest.cpp index 4d8713d8ca..21f670a4ef 100644 --- a/core/unittest/queue/ExactlyOnceSenderQueueUnittest.cpp +++ b/core/unittest/queue/ExactlyOnceSenderQueueUnittest.cpp @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/FlusherSLS.h" -#include "queue/ExactlyOnceSenderQueue.h" -#include "queue/SLSSenderQueueItem.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "pipeline/queue/ExactlyOnceSenderQueue.h" +#include "pipeline/queue/SLSSenderQueueItem.h" #include "unittest/Unittest.h" #include "unittest/queue/FeedbackInterfaceMock.h" diff --git a/core/unittest/queue/FeedbackInterfaceMock.h b/core/unittest/queue/FeedbackInterfaceMock.h index d25b7176b8..b7a8ce7df3 100644 --- a/core/unittest/queue/FeedbackInterfaceMock.h +++ b/core/unittest/queue/FeedbackInterfaceMock.h @@ -19,7 +19,7 @@ #include #include "common/FeedbackInterface.h" -#include "queue/QueueKey.h" +#include "pipeline/queue/QueueKey.h" namespace logtail { diff --git a/core/unittest/queue/ProcessQueueManagerUnittest.cpp b/core/unittest/queue/ProcessQueueManagerUnittest.cpp index 3c2663528d..f283f16342 100644 --- a/core/unittest/queue/ProcessQueueManagerUnittest.cpp +++ b/core/unittest/queue/ProcessQueueManagerUnittest.cpp @@ -15,10 +15,10 @@ #include #include "models/PipelineEventGroup.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/ProcessQueueManager.h" -#include "queue/QueueKeyManager.h" -#include "queue/QueueParam.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/QueueParam.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/queue/QueueKeyManagerUnittest.cpp b/core/unittest/queue/QueueKeyManagerUnittest.cpp index ac69e5b90e..c670508055 100644 --- a/core/unittest/queue/QueueKeyManagerUnittest.cpp +++ b/core/unittest/queue/QueueKeyManagerUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/QueueKeyManager.h" +#include "pipeline/queue/QueueKeyManager.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/queue/QueueParamUnittest.cpp b/core/unittest/queue/QueueParamUnittest.cpp index 826eba0667..8243f15f36 100644 --- a/core/unittest/queue/QueueParamUnittest.cpp +++ b/core/unittest/queue/QueueParamUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/QueueParam.h" +#include "pipeline/queue/QueueParam.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/queue/SenderQueueManagerUnittest.cpp b/core/unittest/queue/SenderQueueManagerUnittest.cpp index dae8b51292..c238327fb3 100644 --- a/core/unittest/queue/SenderQueueManagerUnittest.cpp +++ b/core/unittest/queue/SenderQueueManagerUnittest.cpp @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/FlusherSLS.h" -#include "queue/ExactlyOnceQueueManager.h" -#include "queue/QueueKeyManager.h" -#include "queue/QueueParam.h" -#include "queue/SLSSenderQueueItem.h" -#include "queue/SenderQueueManager.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "pipeline/queue/ExactlyOnceQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/QueueParam.h" +#include "pipeline/queue/SLSSenderQueueItem.h" +#include "pipeline/queue/SenderQueueManager.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(sender_queue_gc_threshold_sec); @@ -141,7 +141,7 @@ void SenderQueueManagerUnittest::TestDeleteQueue() { APSARA_TEST_EQUAL("", QueueKeyManager::GetInstance()->GetName(key1)); // update queue will remove the queue from gc queue - sManager->CreateQueue(key2, vector>{sConcurrencyLimiter}, sMaxRate); + sManager->ReuseQueue(key2); APSARA_TEST_EQUAL(0U, sManager->mQueueDeletionTimeMap.size()); } diff --git a/core/unittest/queue/SenderQueueUnittest.cpp b/core/unittest/queue/SenderQueueUnittest.cpp index 076e2a15fa..181f24d386 100644 --- a/core/unittest/queue/SenderQueueUnittest.cpp +++ b/core/unittest/queue/SenderQueueUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "queue/SenderQueue.h" +#include "pipeline/queue/SenderQueue.h" #include "unittest/Unittest.h" #include "unittest/queue/FeedbackInterfaceMock.h" diff --git a/core/unittest/reader/DeletedFileUnittest.cpp b/core/unittest/reader/DeletedFileUnittest.cpp index ddc7c902f5..23bf0500e7 100644 --- a/core/unittest/reader/DeletedFileUnittest.cpp +++ b/core/unittest/reader/DeletedFileUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "reader/LogFileReader.h" +#include "file_server/reader/LogFileReader.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(force_release_deleted_file_fd_timeout); diff --git a/core/unittest/reader/FileReaderOptionsUnittest.cpp b/core/unittest/reader/FileReaderOptionsUnittest.cpp index d29066611f..762370eda5 100644 --- a/core/unittest/reader/FileReaderOptionsUnittest.cpp +++ b/core/unittest/reader/FileReaderOptionsUnittest.cpp @@ -20,7 +20,7 @@ #include "common/Flags.h" #include "common/JsonUtil.h" #include "pipeline/PipelineContext.h" -#include "reader/FileReaderOptions.h" +#include "file_server/reader/FileReaderOptions.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(default_tail_limit_kb); @@ -39,7 +39,7 @@ class FileReaderOptionsUnittest : public testing::Test { void OnFailedInit() const; private: - const string pluginName = "test"; + const string pluginType = "test"; PipelineContext ctx; }; @@ -76,7 +76,7 @@ void FileReaderOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new FileReaderOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(FileReaderOptions::Encoding::UTF8, config->mFileEncoding); APSARA_TEST_TRUE(config->mTailingAllMatchedFiles); APSARA_TEST_EQUAL(2048U, config->mTailSizeKB); @@ -103,7 +103,7 @@ void FileReaderOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new FileReaderOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(FileReaderOptions::Encoding::GBK, config->mFileEncoding); APSARA_TEST_FALSE(config->mTailingAllMatchedFiles); APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_tail_limit_kb)), config->mTailSizeKB); @@ -122,7 +122,7 @@ void FileReaderOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new FileReaderOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(FileReaderOptions::Encoding::UTF16, config->mFileEncoding); // TailSizeKB @@ -134,7 +134,7 @@ void FileReaderOptionsUnittest::OnSuccessfulInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new FileReaderOptions()); - APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_TRUE(config->Init(configJson, ctx, pluginType)); APSARA_TEST_EQUAL(FileReaderOptions::Encoding::GBK, config->mFileEncoding); APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_tail_limit_kb)), config->mTailSizeKB); } @@ -152,7 +152,7 @@ void FileReaderOptionsUnittest::OnFailedInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new FileReaderOptions()); - APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginType)); configStr = R"( { @@ -161,7 +161,7 @@ void FileReaderOptionsUnittest::OnFailedInit() const { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); config.reset(new FileReaderOptions()); - APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginName)); + APSARA_TEST_FALSE(config->Init(configJson, ctx, pluginType)); } UNIT_TEST_CASE(FileReaderOptionsUnittest, OnSuccessfulInit) diff --git a/core/unittest/reader/ForceReadUnittest.cpp b/core/unittest/reader/ForceReadUnittest.cpp index d7c8e02e08..cd3a750f6f 100644 --- a/core/unittest/reader/ForceReadUnittest.cpp +++ b/core/unittest/reader/ForceReadUnittest.cpp @@ -24,14 +24,14 @@ #include "common/Flags.h" #include "common/JsonUtil.h" #include "config/PipelineConfig.h" -#include "config_manager/ConfigManager.h" -#include "event/BlockEventManager.h" -#include "event/Event.h" -#include "event_handler/EventHandler.h" +#include "file_server/ConfigManager.h" +#include "file_server/event/BlockEventManager.h" +#include "file_server/event/Event.h" +#include "file_server/event_handler/EventHandler.h" #include "file_server/FileServer.h" #include "logger/Logger.h" #include "pipeline/Pipeline.h" -#include "queue/ProcessQueueManager.h" +#include "pipeline/queue/ProcessQueueManager.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/reader/GetLastLineDataUnittest.cpp b/core/unittest/reader/GetLastLineDataUnittest.cpp index 17dc90c118..761c225a57 100644 --- a/core/unittest/reader/GetLastLineDataUnittest.cpp +++ b/core/unittest/reader/GetLastLineDataUnittest.cpp @@ -13,7 +13,7 @@ // limitations under the License. #include "common/FileSystemUtil.h" -#include "reader/LogFileReader.h" +#include "file_server/reader/LogFileReader.h" #include "common/memory/SourceBuffer.h" #include "unittest/Unittest.h" diff --git a/core/unittest/reader/JsonLogFileReaderUnittest.cpp b/core/unittest/reader/JsonLogFileReaderUnittest.cpp index 2e1e7e0d65..cd9f16f805 100644 --- a/core/unittest/reader/JsonLogFileReaderUnittest.cpp +++ b/core/unittest/reader/JsonLogFileReaderUnittest.cpp @@ -19,7 +19,7 @@ #include "common/FileSystemUtil.h" #include "common/RuntimeUtil.h" #include "file_server/FileServer.h" -#include "reader/JsonLogFileReader.h" +#include "file_server/reader/JsonLogFileReader.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(force_release_deleted_file_fd_timeout); diff --git a/core/unittest/reader/LogFileReaderUnittest.cpp b/core/unittest/reader/LogFileReaderUnittest.cpp index 43cb404081..165e4e258d 100644 --- a/core/unittest/reader/LogFileReaderUnittest.cpp +++ b/core/unittest/reader/LogFileReaderUnittest.cpp @@ -21,8 +21,8 @@ #include "common/RuntimeUtil.h" #include "common/memory/SourceBuffer.h" #include "file_server/FileServer.h" -#include "log_pb/sls_logs.pb.h" -#include "reader/LogFileReader.h" +#include "protobuf/sls/sls_logs.pb.h" +#include "file_server/reader/LogFileReader.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(force_release_deleted_file_fd_timeout); diff --git a/core/unittest/reader/RemoveLastIncompleteLogUnittest.cpp b/core/unittest/reader/RemoveLastIncompleteLogUnittest.cpp index 5cadb31890..5ce57053b3 100644 --- a/core/unittest/reader/RemoveLastIncompleteLogUnittest.cpp +++ b/core/unittest/reader/RemoveLastIncompleteLogUnittest.cpp @@ -14,7 +14,7 @@ #include "common/FileSystemUtil.h" #include "common/memory/SourceBuffer.h" -#include "reader/LogFileReader.h" +#include "file_server/reader/LogFileReader.h" #include "unittest/Unittest.h" namespace logtail { diff --git a/core/unittest/reader/SourceBufferUnittest.cpp b/core/unittest/reader/SourceBufferUnittest.cpp index c565110a44..ac1088defa 100644 --- a/core/unittest/reader/SourceBufferUnittest.cpp +++ b/core/unittest/reader/SourceBufferUnittest.cpp @@ -15,7 +15,7 @@ #include "unittest/Unittest.h" #include #include -#include "LogFileReader.h" +#include "file_server/reader/LogFileReader.h" DECLARE_FLAG_INT32(force_release_deleted_file_fd_timeout); diff --git a/core/unittest/route/ConditionUnittest.cpp b/core/unittest/route/ConditionUnittest.cpp index cc14d52860..ed61cb3b4c 100644 --- a/core/unittest/route/ConditionUnittest.cpp +++ b/core/unittest/route/ConditionUnittest.cpp @@ -13,7 +13,7 @@ // limitations under the License. #include "common/JsonUtil.h" -#include "route/Condition.h" +#include "pipeline/route/Condition.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/route/RouterUnittest.cpp b/core/unittest/route/RouterUnittest.cpp index e4614cd64d..bee7bb1803 100644 --- a/core/unittest/route/RouterUnittest.cpp +++ b/core/unittest/route/RouterUnittest.cpp @@ -14,7 +14,7 @@ #include "common/JsonUtil.h" #include "pipeline/Pipeline.h" -#include "route/Router.h" +#include "pipeline/route/Router.h" #include "unittest/Unittest.h" using namespace std; diff --git a/core/unittest/sender/FlusherRunnerUnittest.cpp b/core/unittest/sender/FlusherRunnerUnittest.cpp index f81cf5982c..4ddf83f2cb 100644 --- a/core/unittest/sender/FlusherRunnerUnittest.cpp +++ b/core/unittest/sender/FlusherRunnerUnittest.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "plugin/PluginRegistry.h" -#include "queue/SenderQueueManager.h" -#include "sender/FlusherRunner.h" -#include "sink/http/HttpSink.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "pipeline/queue/SenderQueueManager.h" +#include "runner/FlusherRunner.h" +#include "runner/sink/http/HttpSink.h" #include "unittest/Unittest.h" #include "unittest/plugin/PluginMock.h" diff --git a/core/unittest/sender/SenderUnittest.cpp b/core/unittest/sender/SenderUnittest.cpp index 9d8a1a58f7..6c83796018 100644 --- a/core/unittest/sender/SenderUnittest.cpp +++ b/core/unittest/sender/SenderUnittest.cpp @@ -14,13 +14,13 @@ #include "unittest/Unittest.h" #include -#include "controller/EventDispatcher.h" -#include "config_manager/ConfigManager.h" +#include "file_server/EventDispatcher.h" +#include "file_server/ConfigManager.h" #include "app_config/AppConfig.h" -#include "reader/LogFileReader.h" -#include "event_handler/EventHandler.h" +#include "file_server/reader/LogFileReader.h" +#include "file_server/event_handler/EventHandler.h" #include "monitor/Monitor.h" -#include "event/Event.h" +#include "file_server/event/Event.h" #include "sender/Sender.h" #include #if defined(__linux__) @@ -39,13 +39,13 @@ #include #include #include -#include "log_pb/metric.pb.h" -#include "log_pb/sls_logs.pb.h" +#include "protobuf/sls/metric.pb.h" +#include "protobuf/sls/sls_logs.pb.h" #include "monitor/LogtailAlarm.h" #include "monitor/LogIntegrity.h" -#include "event_handler/LogInput.h" +#include "file_server/event_handler/LogInput.h" #include "common/FileEncryption.h" -#include "processor/daemon/LogProcess.h" +#include "runner/LogProcess.h" #include "common/WaitObject.h" #include "common/Lock.h" #include "common/MemoryBarrier.h" diff --git a/core/unittest/serializer/SLSSerializerUnittest.cpp b/core/unittest/serializer/SLSSerializerUnittest.cpp index e4ee89390f..a3695df4d1 100644 --- a/core/unittest/serializer/SLSSerializerUnittest.cpp +++ b/core/unittest/serializer/SLSSerializerUnittest.cpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "flusher/sls/FlusherSLS.h" -#include "serializer/SLSSerializer.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "pipeline/serializer/SLSSerializer.h" #include "unittest/Unittest.h" DECLARE_FLAG_INT32(max_send_log_group_size); diff --git a/core/unittest/spl/SplBenchmark.cpp b/core/unittest/spl/SplBenchmark.cpp index e51cf08419..8309af3ce4 100644 --- a/core/unittest/spl/SplBenchmark.cpp +++ b/core/unittest/spl/SplBenchmark.cpp @@ -16,12 +16,12 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" -#include "processor/ProcessorSPL.h" -#include "processor/ProcessorParseRegexNative.h" -#include "processor/ProcessorParseJsonNative.h" -#include "processor/ProcessorParseDelimiterNative.h" +#include "plugin/processor/ProcessorSPL.h" +#include "plugin/processor/ProcessorParseRegexNative.h" +#include "plugin/processor/ProcessorParseJsonNative.h" +#include "plugin/processor/ProcessorParseDelimiterNative.h" #include "models/LogEvent.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" #include #include #include "common/TimeUtil.h" diff --git a/core/unittest/spl/SplUnittest.cpp b/core/unittest/spl/SplUnittest.cpp index f548fc94ac..94ce95c50d 100644 --- a/core/unittest/spl/SplUnittest.cpp +++ b/core/unittest/spl/SplUnittest.cpp @@ -16,9 +16,9 @@ #include "common/JsonUtil.h" #include "config/PipelineConfig.h" -#include "processor/ProcessorSPL.h" +#include "plugin/processor/ProcessorSPL.h" #include "models/LogEvent.h" -#include "plugin/instance/ProcessorInstance.h" +#include "pipeline/plugin/instance/ProcessorInstance.h" #include #include diff --git a/docker/Dockerfile_coverage b/docker/Dockerfile_coverage index d165cbfd38..e0fedd193d 100644 --- a/docker/Dockerfile_coverage +++ b/docker/Dockerfile_coverage @@ -30,4 +30,4 @@ RUN python3 -m pip install --upgrade pip RUN cp /usr/local/python3/bin/pip3 /usr/bin/pip3 && pip3 install gcovr==7.0 RUN cp /usr/local/python3/bin/gcovr /usr/bin/gcovr -CMD ["bash", "-c", "gcovr --root . --lcov coverage.lcov --txt coverage.txt -e \".*sdk.*\" -e \".*observer.*\" -e \".*log_pb.*\" -e \".*unittest.*\" -e \".*config_server.*\" -e \".*fuse.*\" -e \".*go_pipeline.*\""] +CMD ["bash", "-c", "gcovr --root . --lcov coverage.lcov --txt coverage.txt -e \".*sdk.*\" -e \".*observer.*\" -e \".*lo.*\" -e \".*unittest.*\" -e \".*config_server.*\" -e \".*fuse.*\" -e \".*go_pipeline.*\""] diff --git a/docs/cn/SUMMARY.md b/docs/cn/SUMMARY.md index 2db8b9343e..d897d68602 100644 --- a/docs/cn/SUMMARY.md +++ b/docs/cn/SUMMARY.md @@ -115,6 +115,12 @@ * [Pulsar](plugins/flusher/flusher-pulsar.md) * [HTTP](plugins/flusher/flusher-http.md) * [Loki](plugins/flusher/loki.md) +* [扩展](plugins/extension/README.md) + * [BasicAuth鉴权](plugins/extension/ext-basicauth.md) + * [协议解码/反序列化](plugins/extension/ext-default-decoder.md) + * [协议编码/序列化](plugins/extension/ext-default-encoder.md) + * [数据筛选](plugins/extension/ext-groupinfo-filter.md) + * [请求熔断](plugins/extension/ext-request-breaker.md) ## 工作原理 diff --git a/docs/cn/awesome-ilogtail/ilogtail.md b/docs/cn/awesome-ilogtail/ilogtail.md index 6fbf544f49..f8947004a3 100644 --- a/docs/cn/awesome-ilogtail/ilogtail.md +++ b/docs/cn/awesome-ilogtail/ilogtail.md @@ -1,13 +1,23 @@ # 走近iLogtail社区版 +## 开源历程 +* 2024-08 [iLogtail 开源两周年:感恩遇见,畅想未来](https://mp.weixin.qq.com/s/RoFjoYlPLG1yOzDGc7vqIQ) +* 2023-02 [鲲鹏展翅凌云志:iLogtail社区2022年度开源报告](https://mp.weixin.qq.com/s/6luD7VUFd_0aaeyUBAShkw) +* 2024-02 [你好,iLogtail 2.0!](https://developer.aliyun.com/article/1441630) +* 2022-07 [千万级可观测数据采集器--iLogtail代码完整开源](https://mp.weixin.qq.com/s/Cam_OjPWhcEj77kqC0Q1SA) +* 2022-08 [iLogtail开源之路](https://mp.weixin.qq.com/s/5j5KJe9BmpZ1tdb-KCx_CQ) + +## 技术分享 +* 2024-08 [软件测试之道 -- 做一个有匠心的程序员!](https://mp.weixin.qq.com/s/ktEMOcXBopFiX9NIN3chHg) 看iLogtail如何做测试设计 +* 2024-08 [代码整洁之道--告别码农,做一个有思想的程序员!](https://mp.weixin.qq.com/s/tK0ZyRxKBGpCqIw16SPSxg) 看iLogtail如何追求代码整洁 +* 2024-04 [破浪前行:iLogtail十年老架构如何浴火重生](https://developer.aliyun.com/article/1484844) +* 2024-04 [跟着iLogtail学习无锁化编程](https://developer.aliyun.com/article/1484342) +* 2023-08 [跟着iLogtail学习设计模式](https://mp.weixin.qq.com/s/gfy9DfvcTuCiBiLJdZLTKQ) +* 2022-12 [性能与可靠的超强碰撞:第三方测评开源日志采集器](https://mp.weixin.qq.com/s/8mCVk3gvXPOijTlcRjUR_w) +* 2022-12 [阿里十年技术沉淀|深度解析百PB级数据总线技术](https://mp.weixin.qq.com/s/NKoTCM5o-Rs_83Wakk9yCw) + +## 最佳实践 +* 2022-09 [零信任策略下K8s安全监控最佳实践](https://mp.weixin.qq.com/s/wYUNsGaWEnQZ0BVxsQORbA) + +## 其他 +* 2021-09 [您有一份来自iLogtail社区的礼物待查收](https://mp.weixin.qq.com/s/fyWwnKR1I4jgNiX30Wu-Vg) -* 2024-04-17 [破浪前行:iLogtail十年老架构如何浴火重生](https://developer.aliyun.com/article/1484844) -* 2024-04-17 [跟着iLogtail学习无锁化编程](https://developer.aliyun.com/article/1484342) -* 2024-02-21 [你好,iLogtail 2.0!](https://developer.aliyun.com/article/1441630) -* 2023-08-30 [跟着iLogtail学习设计模式](https://mp.weixin.qq.com/s/gfy9DfvcTuCiBiLJdZLTKQ) -* 2023-02-17 [鲲鹏展翅凌云志:iLogtail社区2022年度开源报告](https://mp.weixin.qq.com/s/6luD7VUFd_0aaeyUBAShkw) -* 2022-12-08 [性能与可靠的超强碰撞:第三方测评开源日志采集器](https://mp.weixin.qq.com/s/8mCVk3gvXPOijTlcRjUR_w) -* 2022-12-02 [阿里十年技术沉淀|深度解析百PB级数据总线技术](https://mp.weixin.qq.com/s/NKoTCM5o-Rs_83Wakk9yCw) -* 2022-09-09 [零信任策略下K8s安全监控最佳实践](https://mp.weixin.qq.com/s/wYUNsGaWEnQZ0BVxsQORbA) -* 2021-09-01 [您有一份来自iLogtail社区的礼物待查收](https://mp.weixin.qq.com/s/fyWwnKR1I4jgNiX30Wu-Vg) -* 2022-07-12 [千万级可观测数据采集器--iLogtail代码完整开源](https://mp.weixin.qq.com/s/Cam_OjPWhcEj77kqC0Q1SA) -* 2022-08-23 [iLogtail开源之路](https://mp.weixin.qq.com/s/5j5KJe9BmpZ1tdb-KCx_CQ) diff --git a/docs/cn/config-server/developer-guide.md b/docs/cn/config-server/developer-guide.md index cd0672ea29..da673e5790 100644 --- a/docs/cn/config-server/developer-guide.md +++ b/docs/cn/config-server/developer-guide.md @@ -5,7 +5,7 @@ 1. 在[管控协议讨论版](https://github.com/alibaba/ilogtail/discussions/404)提出修改意见 2. 意见通过后,修改[管控协议文件](https://github.com/alibaba/ilogtail/tree/main/config_server/protocol) 3. 管控协议生成对应语言的版本,并进行代码适配 - * ilogtail C++ 部分:[agent.proto](https://github.com/alibaba/ilogtail/tree/main/core/config_server_pb) + * ilogtail C++ 部分:[agent.proto](https://github.com/alibaba/ilogtail/tree/main/core/protobuf/config_server) * ConfigServer Golang 部分:[agent.proto & user.proto](https://github.com/alibaba/ilogtail/tree/main/config_server/service/proto/v1) ## 开发 diff --git a/docs/cn/developer-guide/plugin-development/plugin-development-guide.md b/docs/cn/developer-guide/plugin-development/plugin-development-guide.md index 90f4d77468..c951636536 100644 --- a/docs/cn/developer-guide/plugin-development/plugin-development-guide.md +++ b/docs/cn/developer-guide/plugin-development/plugin-development-guide.md @@ -25,6 +25,7 @@ ilogtail 插件的开发主要有以下步骤: * [如何开发Aggregator插件](./how-to-write-aggregator-plugins.md) * [如何开发Flusher插件](./how-to-write-flusher-plugins.md) * [如何开发Extension插件](./how-to-write-extension-plugins.md) +* [插件配置项基本原则](./principles-of-plugin-configuration.md) ## 文档撰写流程 diff --git a/docs/cn/developer-guide/plugin-development/plugin-self-monitor-guide.md b/docs/cn/developer-guide/plugin-development/plugin-self-monitor-guide.md index 13ba63c910..625e6b141f 100644 --- a/docs/cn/developer-guide/plugin-development/plugin-self-monitor-guide.md +++ b/docs/cn/developer-guide/plugin-development/plugin-self-monitor-guide.md @@ -30,8 +30,8 @@ type ProcessorRateLimit struct { 创建指标时,需要将其注册到iLogtail Context 的 MetricRecord 中,以便 iLogtail 能够采集上报数据,在插件的Init方法中,调用context 的 GetMetricRecord()方法来获取MetricRecord,然后调用helper.New**XXX**MetricAndRegister函数去注册一个指标,例如: ```go metricsRecord := p.context.GetMetricRecord() -p.limitMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_limited", pluginName)) -p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginName)) +p.limitMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_limited", pluginType)) +p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) ``` 用户在声明一个Metric时可以还额外注入一些插件级别的静态Label,这是一个可选参数,例如flusher_http就把RemoteURL等配置进行上报: ```go diff --git a/docs/cn/developer-guide/plugin-development/principles-of-plugin-configuration.md b/docs/cn/developer-guide/plugin-development/principles-of-plugin-configuration.md new file mode 100644 index 0000000000..77a27bedb5 --- /dev/null +++ b/docs/cn/developer-guide/plugin-development/principles-of-plugin-configuration.md @@ -0,0 +1,39 @@ +# 插件配置项基本原则 + +## 必选参数的处理原则 + +1. key 拼写错误:等价于该 key 没有被配置,直接拒绝加载该参数 +2. value 格式错误、取值错误:直接拒绝加载该参数 + +## 可选参数的处理原则 + +1. key 拼写错误:等价于该 key 没有被配置,使用默认值 +2. value 格式错误、取值错误:使用默认值 + +## 过滤类参数的处理原则 + +### 过滤类参数出现错误的处理原则 + +#### 白名单 + +1. key 不存在或者 key 取值为空:不进行过滤 +2. value 有值但全部取值错误:使用默认值,一般不进行过滤 +3. value 有值且存在正确取值:使用正确取值作为白名单过滤参数 + +#### 黑名单 + +1. key 不存在或者 key 取值为空:不进行过滤 +2. value 有值但全部取值错误:使用默认值,一般不进行过滤 +3. value 有值且存在正确取值:使用正确取值作为黑名单过滤参数 + +### 过滤类参数的内外部作用关系 + +#### 白名单 + +1. 每一个列表过滤参数的内部:匹配到列表的每一个取值都被允许过滤通过,内部过滤条件之间是“或”的关系 +2. 多个列表过滤参数之间:只有在所有列表参数中都被允许过滤通过的取值才会最终通过,多个列表过滤参数之间是“且”的关系 + +#### 黑名单 + +1. 每一个列表过滤参数的内部:匹配到列表内的每一个取值都不允许被过滤通过,内部过滤条件之间是“或”的关系 +2. 多个列表过滤参数之间:在每个列表参数中不被允许过滤通过的取值最终都不会通过,多个列表过滤参数之间是“或”的关系 diff --git a/docs/cn/developer-guide/test/e2e-test-step.md b/docs/cn/developer-guide/test/e2e-test-step.md index 80bf348064..f9c2cc49a5 100644 --- a/docs/cn/developer-guide/test/e2e-test-step.md +++ b/docs/cn/developer-guide/test/e2e-test-step.md @@ -4,6 +4,8 @@ iLogtail提供了一个完整的E2E测试引擎,方便您快速开展集成测 ## 目前支持的测试行为 +可以参考 `test/engine/steps.go` 中的定义,目前支持的测试行为如下: + | 行为类型 | 模板 | 参数 | 说明 | | --- | --- | --- | --- | | Given | ^\{(\S+)\} environment$ | 环境类型 | 初始化远程测试环境 | @@ -59,7 +61,7 @@ return context.WithValue(ctx, key, value), nil ### 2. 注册行为函数 -在`test/cases/core/main_test.go`中,您需要注册您的行为函数。注册函数的格式如下所示: +在`test/engine/steps.go`中,您需要注册您的行为函数。注册函数的格式如下所示: ```go func scenarioInitializer(ctx *godog.ScenarioContext) { diff --git a/docs/cn/developer-guide/test/unit-test.md b/docs/cn/developer-guide/test/unit-test.md index 4e3ddfa129..a660eb9799 100644 --- a/docs/cn/developer-guide/test/unit-test.md +++ b/docs/cn/developer-guide/test/unit-test.md @@ -19,7 +19,7 @@ cmake -DBUILD_LOGTAIL_UT=ON <其他编译参数> .. ```shell mkdir -p coverage-report # 生成详细的报告 -gcovr -r ./core --txt coverage-report/index.txt --html-details --html coverage-report/index.html -e ".*sdk.*" -e ".*observer.*" -e ".*log_pb.*" -e ".*unittest.*" -e ".*config_server.*" -e ".*fuse.*" -e ".*go_pipeline.*" +gcovr -r ./core --txt coverage-report/index.txt --html-details --html coverage-report/index.html -e ".*sdk.*" -e ".*observer.*" -e ".*protobuf.*" -e ".*unittest.*" -e ".*config_server.*" -e ".*fuse.*" -e ".*go_pipeline.*" # 生成本次commit diff的报告 python3 tools/coverage-diff/main.py coverage-report/index.txt ``` diff --git a/docs/cn/plugins/extension/README.md b/docs/cn/plugins/extension/README.md new file mode 100644 index 0000000000..bbf31e717b --- /dev/null +++ b/docs/cn/plugins/extension/README.md @@ -0,0 +1,3 @@ +# 扩展 + +扩展插件用于对其它插件能力的补充(e.g. 鉴权、编解码、熔断、限流……) \ No newline at end of file diff --git a/docs/cn/plugins/extension/ext-default-encoder.md b/docs/cn/plugins/extension/ext-default-encoder.md new file mode 100644 index 0000000000..b5f6df9078 --- /dev/null +++ b/docs/cn/plugins/extension/ext-default-encoder.md @@ -0,0 +1,37 @@ +# DefaultEncoder Encoder扩展 + +## 简介 + +[ext_default_encoder](https://github.com/alibaba/ilogtail/blob/main/plugins/extension/default_encoder/default_encoder.go) +扩展,实现了 [Encoder](https://github.com/alibaba/ilogtail/blob/main/pkg/pipeline/extensions/encoder.go) 接口,可以用在 +`flusher_http` 等插件中用于序列化不同的协议数据。 + +## 版本 + +[Alpha](../stability-level.md) + +## 配置参数 + +| 参数 | 类型 | 是否必选 | 说明 | +|-------------|--------|------|-----------------------------------------------------------------------------------------------------------| +| Format | String | 是 | 具体的协议,[查看支持的具体协议列表](https://github.com/alibaba/ilogtail/blob/master/pkg/protocol/encoder/common/comon.go) | +| SeriesLimit | Int | 否 | 触发序列化时序切片的最大长度,默认 1000,仅针对 Format=prometheus 时有效 | + +## 样例 + +使用 `flusher_http` flusher 插件,配置发送 `prometheus` 协议数据。 + +```yaml +enable: true +flushers: +- Type: flusher_http + ... + Encoder: + Type: ext_default_encoder/prometheus + ... +... +extensions: +- Type: ext_default_encoder/prometheus + Format: 'prometheus' + SeriesLimit: 1024 +``` diff --git a/docs/cn/plugins/flusher/flusher-http.md b/docs/cn/plugins/flusher/flusher-http.md index b8502d19a6..8bd5a88fdc 100644 --- a/docs/cn/plugins/flusher/flusher-http.md +++ b/docs/cn/plugins/flusher/flusher-http.md @@ -10,27 +10,33 @@ ## 配置参数 -| 参数 | 类型 | 是否必选 | 说明 | -|------------------------------|--------------------| -------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Type | String | 是 | 插件类型,固定为`flusher_http` | -| RemoteURL | String | 是 | 要发送到的URL地址,示例:`http://localhost:8086/write` | -| Headers | Map | 否 | 发送时附加的http请求header,如可添加 Authorization、Content-Type等信息,支持动态变量写法,如`{"x-db":"%{tag.db}"}`

v2版本支持从Group的Metadata或者Group.Tags中获取动态变量,如`{"x-db":"%{metadata.db}"}`或者`{"x-db":"%{tag.db}"}`

| -| Query | Map | 否 | 发送时附加到url上的query参数,支持动态变量写法,如`{"db":"%{tag.db}"}`

v2版本支持从Group的Metadata或者Group.Tags中获取动态变量,如`{"db":"%{metadata.db}"}`或者`{"db":"%{tag.db}"}`

| -| Timeout | String | 否 | 请求的超时时间,默认 `60s` | -| Retry.Enable | Boolean | 否 | 是否开启失败重试,默认为 `true` | -| Retry.MaxRetryTimes | Int | 否 | 最大重试次数,默认为 `3` | -| Retry.InitialDelay | String | 否 | 首次重试时间间隔,默认为 `1s`,重试间隔以会2的倍数递增 | -| Retry.MaxDelay | String | 否 | 最大重试时间间隔,默认为 `30s` | -| Convert | Struct | 否 | ilogtail数据转换协议配置 | -| Convert.Protocol | String | 否 | ilogtail数据转换协议,可选值:`custom_single`,`influxdb`, `jsonline`。默认值:`custom_single`

v2版本可选值:`raw`

| -| Convert.Encoding | String | 否 | ilogtail flusher数据转换编码,可选值:`json`, `custom`,默认值:`json` | -| Convert.Separator | String | 否 | ilogtail数据转换时,PipelineGroupEvents中多个Events之间拼接使用的分隔符。如`\n`。若不设置,则默认不拼接Events,即每个Event作为独立请求向后发送。 默认值为空。

当前仅在`Convert.Protocol: raw`有效。

| -| Convert.IgnoreUnExpectedData | Boolean | 否 | ilogtail数据转换时,遇到非预期的数据的行为,true 跳过,false 报错。默认值 true | -| Convert.TagFieldsRename | Map | 否 | 对日志中tags中的json字段重命名 | -| Convert.ProtocolFieldsRename | Map | 否 | ilogtail日志协议字段重命名,可当前可重命名的字段:`contents`,`tags`和`time` | -| Concurrency | Int | 否 | 向url发起请求的并发数,默认为`1` | -| QueueCapacity | Int | 否 | 内部channel的缓存大小,默认为1024 -| AsyncIntercept | Boolean | 否 | 异步过滤数据,默认为否 +| 参数 | 类型 | 是否必选 | 说明 | +|------------------------------|--------------------|------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Type | String | 是 | 插件类型,固定为`flusher_http` | +| RemoteURL | String | 是 | 要发送到的URL地址,示例:`http://localhost:8086/write` | +| Headers | Map | 否 | 发送时附加的http请求header,如可添加 Authorization、Content-Type等信息,支持动态变量写法,如`{"x-db":"%{tag.db}"}`

v2版本支持从Group的Metadata或者Group.Tags中获取动态变量,如`{"x-db":"%{metadata.db}"}`或者`{"x-db":"%{tag.db}"}`

| +| Query | Map | 否 | 发送时附加到url上的query参数,支持动态变量写法,如`{"db":"%{tag.db}"}`

v2版本支持从Group的Metadata或者Group.Tags中获取动态变量,如`{"db":"%{metadata.db}"}`或者`{"db":"%{tag.db}"}`

| +| Timeout | String | 否 | 请求的超时时间,默认 `60s` | +| Retry.Enable | Boolean | 否 | 是否开启失败重试,默认为 `true` | +| Retry.MaxRetryTimes | Int | 否 | 最大重试次数,默认为 `3` | +| Retry.InitialDelay | String | 否 | 首次重试时间间隔,默认为 `1s`,重试间隔以会2的倍数递增 | +| Retry.MaxDelay | String | 否 | 最大重试时间间隔,默认为 `30s` | +| Convert | Struct | 否 | ilogtail数据转换协议配置 | +| Convert.Protocol | String | 否 | ilogtail数据转换协议,可选值:`custom_single`,`influxdb`, `jsonline`。默认值:`custom_single`

v2版本可选值:`raw`

| +| Convert.Encoding | String | 否 | ilogtail flusher数据转换编码,可选值:`json`, `custom`,默认值:`json` | +| Convert.Separator | String | 否 | ilogtail数据转换时,PipelineGroupEvents中多个Events之间拼接使用的分隔符。如`\n`。若不设置,则默认不拼接Events,即每个Event作为独立请求向后发送。 默认值为空。

当前仅在`Convert.Protocol: raw`有效。

| +| Convert.IgnoreUnExpectedData | Boolean | 否 | ilogtail数据转换时,遇到非预期的数据的行为,true 跳过,false 报错。默认值 true | +| Convert.TagFieldsRename | Map | 否 | 对日志中tags中的json字段重命名 | +| Convert.ProtocolFieldsRename | Map | 否 | ilogtail日志协议字段重命名,可当前可重命名的字段:`contents`,`tags`和`time` | +| Concurrency | Int | 否 | 向url发起请求的并发数,默认为`1` | +| MaxConnsPerHost | Int | 否 | 每个host上的最大HTTP连接数(包含了拨号阶段的、活跃的、空闲的),默认`0`,表示不限制

当其值大于http.DefaultTransport.(*http.Transport).MaxConnsPerHost时(当前是`0`),会采用该值 | +| MaxIdleConnsPerHost | Int | 否 | 每个host上的最大空闲的HTTP连接数,默认`0`,表示不限制

当其值大于http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost时(当前是`0`),会采用该值 | +| IdleConnTimeout | String | 否 | HTTP连接在关闭前保持闲置状态的最长时间,默认`90s`

当其值大于http.DefaultTransport.(*http.Transport).IdleConnTimeout时(当前是`90s`),会采用该值 | +| WriteBufferSize | Int | 否 | 写缓冲区的大小,不填不会给http.DefaultTransport.(*http.Transport).WriteBufferSize赋值,此时采用默认的`4KB`

当其值大于0时,会采用该值 | +| QueueCapacity | Int | 否 | 内部channel的缓存大小,默认为1024 +| AsyncIntercept | Boolean | 否 | 异步过滤数据,默认为否 +| DropEventWhenQueueFull | Boolean | 否 | 当队列满时是否丢弃数据,否则需要等待,默认为不丢弃 | +| Compression | string | 否 | 压缩策略,目前支持gzip和snappy,默认不开启 | ## 样例 @@ -79,3 +85,30 @@ flushers: 需要注意的是,由于使用`jsonline`协议(会将日志的content和tag打平),所以仅支持使用`json`格式进行提交。 由于`jsonline`默认会批量提交日志,所以建议调低`QueueCapacity`,避免在日志量较大的情况下,发生内存占用过多或OOM的问题。 + +采集Prometheus指标,并将指标以Prometheus协议发送到`PROMETHEUS_REMOTEWRITE_ADDRESS`。 +这里用到了`ext_default_encoder`插件,该插件可以配置使用Prometheus Encoder,从而支持将采集到的数据转换为Prometheus协议。 +```yaml +enable: true +global: + StructureType: v2 +inputs: +- Type: service_prometheus + ConfigFilePath: '/etc/prometheus/prometheus.yml' +flushers: +- Type: flusher_http + RemoteURL: 'http://PROMETHEUS_REMOTEWRITE_ADDRESS/api/v1/write' + Concurrency: 10 + QueueCapacity: 4096 + DropEventWhenQueueFull: true + Encoder: + Type: ext_default_encoder + Format: 'prometheus' + SeriesLimit: 1024 + Authenticator: + Type: ext_basicauth +extensions: +- Type: ext_basicauth + Username: 'YOUR_USERNAME' + Password: 'YOUR_PASSWORD' +``` \ No newline at end of file diff --git a/docs/cn/plugins/input/input-ebpf-file-security.md b/docs/cn/plugins/input/input-ebpf-file-security.md index 2cf1bdeac2..9c3045282f 100644 --- a/docs/cn/plugins/input/input-ebpf-file-security.md +++ b/docs/cn/plugins/input/input-ebpf-file-security.md @@ -1,8 +1,8 @@ -# iuput_ebpf_file_security 插件 +# input_ebpf_file_security 插件 ## 简介 -`iuput_ebpf_file_security`插件可以实现利用ebpf探针采集文件安全相关动作。 +`input_ebpf_file_security`插件可以实现利用ebpf探针采集文件安全相关动作。 ## 版本 @@ -12,12 +12,10 @@ | **参数** | **类型** | **是否必填** | **默认值** | **说明** | | --- | --- | --- | --- | --- | -| Type | string | 是 | / | 插件类型。固定为iuput\_ebpf\_file\_security | -| ProbeConfig | \[object\] | 是 | / | 插件配置参数列表 | -| ProbeConfig.CallName | \[string\] | 否 | 空 | 系统调用函数 | -| ProbeConfig.FilePathFilter | \[object\] | 是 | / | 过滤参数 | -| ProbeConfig.FilePathFilter.FilePath | string | 是 | / | 文件路径 | -| ProbeConfig.FilePathFilter.FileName | string | 否 | 空 | 文件名。不填 FileName 代表采集对应的 FilePath 下所有文件 | +| Type | string | 是 | / | 插件类型。固定为input\_ebpf\_file\_security | +| ProbeConfig | \[object\] | 否 | ProbeConfig 默认包含一个 Option,其中包含一个默认取全部值的 CallNameFilter,其他 Filter 默认为空 | ProbeConfig 可以包含多个 Option, Option 内部有多个 Filter,Filter 内部是或的关系,Filter 之间是且的关系,Option 之间是或的关系 | +| ProbeConfig[xx].CallNameFilter | \[string\] | 否 | 该插件支持的所有 callname: [ security_file_permission security_mmap_file security_path_truncate ] | 内核挂载点过滤器,按照白名单模式运行,不填表示配置该插件所支持的所有挂载点 | +| ProbeConfig[xx].FilePathFilter | \[string\] | 否 | 空 | 文件路径过滤器,按照白名单模式运行,不填表示不进行过滤 | ## 样例 @@ -36,17 +34,15 @@ enable: true inputs: - Type: input_ebpf_fileprobe_security ProbeConfig: - - CallName: + - CallNameFilter: - "security_file_permission" FilePathFilter: - - FilePath: "/etc/" - FileName: "passwd" - - FilePath: "/lib" - - CallName: + - "/etc/passwd" + - "/lib" + - CallNameFilter: - "security_path_truncate" FilePathFilter: - - FilePath: "/etc/" - FileName: "passwd" + - "/etc/passwd" flushers: - Type: flusher_stdout OnlyStdout: true diff --git a/docs/cn/plugins/input/input-ebpf-network-observer.md b/docs/cn/plugins/input/input-ebpf-network-observer.md index b1fbfded03..f3c9f845aa 100644 --- a/docs/cn/plugins/input/input-ebpf-network-observer.md +++ b/docs/cn/plugins/input/input-ebpf-network-observer.md @@ -1,8 +1,8 @@ -# iuput_ebpf_network_observer 插件 +# input_ebpf_network_observer 插件 ## 简介 -`iuput_ebpf_network_observer`插件可以实现利用ebpf探针采集网络可观测数据。 +`input_ebpf_network_observer`插件可以实现利用ebpf探针采集网络可观测数据。 ## 版本 @@ -12,8 +12,11 @@ | **参数** | **类型** | **是否必填** | **默认值** | **说明** | | --- | --- | --- | --- | --- | -| Type | string | 是 | / | 插件类型。固定为iuput\_ebpf\_network\_observer | +| Type | string | 是 | / | 插件类型。固定为input\_ebpf\_network\_observer | | ProbeConfig | object | 是 | / | 插件配置参数列表 | +| ProbeConfig.EnableLog | bool | 否 | true | 是否开启日志上报 | +| ProbeConfig.EnableSpan | bool | 否 | false | 是否开启跨度上报 | +| ProbeConfig.EnableMetric | bool | 否 | false | 是否开启指标上报 | | ProbeConfig.EnableProtocols | \[string\] | 否 | 空 | 允许的协议类型 | | ProbeConfig.DisableProtocolParse | bool | 否 | false | TODO | | ProbeConfig.DisableConnStats | bool | 否 | false | TODO | @@ -36,6 +39,9 @@ enable: true inputs: - Type: input_ebpf_sockettraceprobe_observer ProbeConfig: + EnableLog: true + EnableMetric: false + EnableSpan: false EnableProtocols: - "http" DisableConnStats: false diff --git a/docs/cn/plugins/input/input-ebpf-network-security.md b/docs/cn/plugins/input/input-ebpf-network-security.md index 472386769c..64d95d31fa 100644 --- a/docs/cn/plugins/input/input-ebpf-network-security.md +++ b/docs/cn/plugins/input/input-ebpf-network-security.md @@ -12,18 +12,18 @@ | **参数** | **类型** | **是否必填** | **默认值** | **说明** | | --- | --- | --- | --- | --- | -| Type | string | 是 | / | 插件类型。固定为iuput\_ebpf\_network\_security | -| ProbeConfig | \[object\] | 是 | / | 插件配置参数列表 | -| ProbeConfig.CallName | \[string\] | 否 | 空 | 系统调用函数 | -| ProbeConfig.AddrFilter | object | 是 | / | 过滤参数 | -| ProbeConfig.AddrFilter.DestAddrList | \[string\] | 否 | 空 | 目的IP地址 | -| ProbeConfig.AddrFilter.DestPortList | \[string\] | 否 | 空 | 目的端口 | -| ProbeConfig.AddrFilter.DestAddrBlackList | \[string\] | 否 | 空 | 目的IP地址黑名单 | -| ProbeConfig.AddrFilter.DestPortBlackList | \[string\] | 否 | 空 | 目的端口黑名单 | -| ProbeConfig.AddrFilter.SourceAddrList | \[string\] | 否 | 空 | 源IP地址 | -| ProbeConfig.AddrFilter.SourcePortList | \[string\] | 否 | 空 | 源端口 | -| ProbeConfig.AddrFilter.SourceAddrBlackList | \[string\] | 否 | 空 | 源IP地址黑名单 | -| ProbeConfig.AddrFilter.SourcePortBlackList | \[string\] | 否 | 空 | 源端口黑名单 | +| Type | string | 是 | / | 插件类型。固定为input\_ebpf\_network\_security | +| ProbeConfig | \[object\] | 否 | ProbeConfig 默认包含一个 Option,其中包含一个默认取全部值的 CallNameFilter,其他 Filter 默认为空 | ProbeConfig 可以包含多个 Option, Option 内部有多个 Filter,Filter 内部是或的关系,Filter 之间是且的关系,Option 之间是或的关系 | +| ProbeConfig[xx].CallNameFilter | \[string\] | 否 | 该插件支持的所有 callname: [ tcp_connect tcp_close tcp_sendmsg ] | 内核挂载点过滤器,按照白名单模式运行,不填表示配置该插件所支持的所有挂载点 | +| ProbeConfig[xx].AddrFilter | object | 否 | / | 网络地址过滤器 | +| ProbeConfig[xx].AddrFilter.DestAddrList | \[string\] | 否 | 空 | 目的IP地址白名单,不填表示不进行过滤 | +| ProbeConfig[xx].AddrFilter.DestPortList | \[string\] | 否 | 空 | 目的端口白名单,不填表示不进行过滤 | +| ProbeConfig[xx].AddrFilter.DestAddrBlackList | \[string\] | 否 | 空 | 目的IP地址黑名单,不填表示不进行过滤 | +| ProbeConfig[xx].AddrFilter.DestPortBlackList | \[string\] | 否 | 空 | 目的端口黑名单,不填表示不进行过滤 | +| ProbeConfig[xx].AddrFilter.SourceAddrList | \[string\] | 否 | 空 | 源IP地址白名单,不填表示不进行过滤 | +| ProbeConfig[xx].AddrFilter.SourcePortList | \[string\] | 否 | 空 | 源端口白名单,不填表示不进行过滤 | +| ProbeConfig[xx].AddrFilter.SourceAddrBlackList | \[string\] | 否 | 空 | 源IP地址黑名单,不填表示不进行过滤 | +| ProbeConfig[xx].AddrFilter.SourcePortBlackList | \[string\] | 否 | 空 | 源端口黑名单,不填表示不进行过滤 | ## 样例 @@ -42,7 +42,7 @@ enable: true inputs: - Type: input_ebpf_sockettraceprobe_security ProbeConfig: - - CallName: + - CallNameFilter: - "tcp_connect" - "tcp_close" AddrFilter: @@ -55,7 +55,7 @@ inputs: - "127.0.0.1/8" SourcePortBlackList: - 9300 - - CallName: + - CallNameFilter: - "tcp_sendmsg" AddrFilter: DestAddrList: diff --git a/docs/cn/plugins/input/input-ebpf-process-security.md b/docs/cn/plugins/input/input-ebpf-process-security.md index 0cf6336a41..5421eac161 100644 --- a/docs/cn/plugins/input/input-ebpf-process-security.md +++ b/docs/cn/plugins/input/input-ebpf-process-security.md @@ -1,8 +1,8 @@ -# iuput_ebpf_process_security 插件 +# input_ebpf_process_security 插件 ## 简介 -`iuput_ebpf_process_security`插件可以实现利用ebpf探针采集进程安全相关动作。 +`input_ebpf_process_security`插件可以实现利用ebpf探针采集进程安全相关动作。 ## 版本 @@ -12,13 +12,9 @@ | **参数** | **类型** | **是否必填** | **默认值** | **说明** | | --- | --- | --- | --- | --- | -| Type | string | 是 | / | 插件类型。固定为iuput\_ebpf\_process\_security | -| ProbeConfig | \[object\] | 是 | / | 插件配置参数列表 | -| ProbeConfig.CallName | \[string\] | 否 | 空 | 系统调用函数 | -| ProbeConfig.NamespaceFilter | object | 否 | 空 | 命名空间 | -| ProbeConfig.NamespaceBlackFilter | object | 否 | 空 | 命名空间 | -| ProbeConfig.Namespace\[Black\]Filter.NamespaceType | string | 是 | / | 命名空间类型 \[范围:Uts, Ipc, Mnt, Pid, PidForChildren, Net, Cgroup, User, Time, TimeForChildren\] | -| ProbeConfig.Namespace\[Black\]Filter.ValueList | \[string\] | 是 | / | 特定命名空间类型对应的取值列表 | +| Type | string | 是 | / | 插件类型。固定为input\_ebpf\_process\_security | +| ProbeConfig | \[object\] | 否 | ProbeConfig 默认包含一个 Option,其中包含一个默认取全部值的 CallNameFilter,其他 Filter 默认为空 | ProbeConfig 可以包含多个 Option, Option 内部有多个 Filter,Filter 内部是或的关系,Filter 之间是且的关系,Option 之间是或的关系 | +| ProbeConfig[xx].CallNameFilter | \[string\] | 否 | 该插件支持的所有 callname: [ sys_enter_execve sys_enter_clone disassociate_ctty acct_process wake_up_new_task ] | 内核挂载点过滤器,按照白名单模式运行,不填表示配置该插件所支持的所有挂载点 | ## 样例 @@ -36,14 +32,6 @@ TODO enable: true inputs: - Type: input_ebpf_processprobe_security - ProbeConfig: - NamespaceFilter: - - NamespaceType: "Pid" - ValueList: - - "4026531833" - - NamespaceType: "Mnt" - ValueList: - - "4026531834" flushers: - Type: flusher_stdout OnlyStdout: true diff --git a/docs/cn/plugins/input/input-file.md b/docs/cn/plugins/input/input-file.md index afdc3321a1..099449a35d 100644 --- a/docs/cn/plugins/input/input-file.md +++ b/docs/cn/plugins/input/input-file.md @@ -2,7 +2,7 @@ ## 简介 -`iuput_file`插件可以实现从文本文件中采集日志。采集的日志内容将会保存在事件的`content`字段中。 +`input_file`插件可以实现从文本文件中采集日志。采集的日志内容将会保存在事件的`content`字段中。 ## 版本 diff --git a/docs/cn/plugins/overview.md b/docs/cn/plugins/overview.md index f462a05692..c0ccf6a220 100644 --- a/docs/cn/plugins/overview.md +++ b/docs/cn/plugins/overview.md @@ -115,3 +115,9 @@ | 名称 | 提供方 | 简介 | |----------------------------------------------------------------------------|-------------------------------------------------|-----------------------------| | [`ext_default_decoder`](extension/ext-default-decoder.md)
默认的decoder扩展 | 社区
[`snakorse`](https://github.com/snakorse) | 将内置支持的Format以Decoder扩展的形式封装 | + +### Encoder + +| 名称 | 提供方 | 简介 | +|----------------------------------------------------------------------------|--------------------------------------------------------|-----------------------------| +| [`ext_default_encoder`](extension/ext-default-encoder.md)
默认的encoder扩展 | 社区
[`yuanshuai.1900`](https://github.com/aiops1900) | 将内置支持的Format以Encoder扩展的形式封装 | diff --git a/go.mod b/go.mod index dac61b2cf6..0febacd384 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/ClickHouse/clickhouse-go/v2 v2.6.0 github.com/IBM/sarama v1.42.2 - github.com/VictoriaMetrics/VictoriaMetrics v1.83.1 + github.com/VictoriaMetrics/VictoriaMetrics v1.83.0 github.com/alibaba/ilogtail/pkg v0.0.0 github.com/apache/pulsar-client-go v0.10.0 github.com/buger/jsonparser v1.1.1 @@ -24,6 +24,7 @@ require ( github.com/go-ping/ping v0.0.0-20211130115550-779d1e919534 github.com/go-sql-driver/mysql v1.7.1 github.com/gogo/protobuf v1.3.2 + github.com/golang/snappy v0.0.4 github.com/gosnmp/gosnmp v1.34.0 github.com/grafana/loki-client-go v0.0.0-20230116142646-e7494d0ef70c github.com/hashicorp/golang-lru/v2 v2.0.2 @@ -44,7 +45,7 @@ require ( github.com/prometheus/client_golang v1.14.0 github.com/prometheus/common v0.42.0 github.com/prometheus/procfs v0.8.0 - github.com/pyroscope-io/pyroscope v0.37.2 + github.com/pyroscope-io/pyroscope v1.5.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sirupsen/logrus v1.8.1 github.com/smartystreets/goconvey v1.7.2 @@ -143,7 +144,6 @@ require ( github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/golang/snappy v0.0.4 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect diff --git a/licenses/LICENSE_OF_TESTENGINE_DEPENDENCIES.md b/licenses/LICENSE_OF_TESTENGINE_DEPENDENCIES.md index 00830637ad..b255f4c012 100644 --- a/licenses/LICENSE_OF_TESTENGINE_DEPENDENCIES.md +++ b/licenses/LICENSE_OF_TESTENGINE_DEPENDENCIES.md @@ -104,9 +104,17 @@ When distributed in a binary form, Logtailplugin Test Engine may contain portion - [github.com/emicklei/go-restful](https://pkg.go.dev/github.com/emicklei/go-restful?tab=licenses) - [github.com/josharian/intern](https://pkg.go.dev/github.com/josharian/intern?tab=licenses) - [github.com/mailru/easyjson](https://pkg.go.dev/github.com/mailru/easyjson?tab=licenses) +- [github.com/cucumber/gherkin/go](https://pkg.go.dev/github.com/cucumber/gherkin/go?tab=licenses) +- [github.com/cucumber/godog](https://pkg.go.dev/github.com/cucumber/godog?tab=licenses) +- [github.com/cucumber/messages/go](https://pkg.go.dev/github.com/cucumber/messages/go?tab=licenses) +- [github.com/gofrs/uuid](https://pkg.go.dev/github.com/gofrs/uuid?tab=licenses) ## Mozilla Public License 2.0 licenses +- [github.com/hashicorp/go-immutable-radix](https://pkg.go.dev/github.com/hashicorp/go-immutable-radix?tab=licenses) +- [github.com/hashicorp/go-memdb](https://pkg.go.dev/github.com/hashicorp/go-memdb?tab=licenses) +- [github.com/hashicorp/golang-lru](https://pkg.go.dev/github.com/hashicorp/golang-lru?tab=licenses) + ## LGPL v3 licenses ## EPL V2 licenses diff --git a/pkg/doc/generate.go b/pkg/doc/generate.go index 801c0ca461..0832251ca4 100644 --- a/pkg/doc/generate.go +++ b/pkg/doc/generate.go @@ -68,8 +68,8 @@ func Generate(path string) { _ = os.WriteFile(fileName, []byte(str), 0600) } -func generatePluginDoc(fileName, pluginName string, doc Doc) { - str := topLevel + pluginName + lf +func generatePluginDoc(fileName, pluginType string, doc Doc) { + str := topLevel + pluginType + lf str += secondLevel + "Description" + lf str += doc.Description() + lf str += secondLevel + "Config" + lf diff --git a/pkg/go.mod b/pkg/go.mod index 0de999b532..5e9313b879 100644 --- a/pkg/go.mod +++ b/pkg/go.mod @@ -4,6 +4,7 @@ go 1.19 require ( github.com/Microsoft/go-winio v0.5.2 + github.com/VictoriaMetrics/VictoriaMetrics v1.83.0 github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash/v2 v2.2.0 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 @@ -18,12 +19,13 @@ require ( github.com/influxdata/telegraf v1.20.0 github.com/json-iterator/go v1.1.12 github.com/mailru/easyjson v0.7.7 + github.com/mitchellh/mapstructure v1.4.2 github.com/narqo/go-dogstatsd-parser v0.2.0 github.com/pierrec/lz4 v2.6.1+incompatible github.com/prometheus/common v0.42.0 github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 github.com/pyroscope-io/jfr-parser v0.6.0 - github.com/pyroscope-io/pyroscope v0.0.0-00010101000000-000000000000 + github.com/pyroscope-io/pyroscope v1.5.0 github.com/richardartoul/molecule v1.0.0 github.com/smartystreets/goconvey v1.7.2 github.com/stretchr/testify v1.8.2 diff --git a/pkg/go.sum b/pkg/go.sum index 2b45645951..c8cced1fc8 100644 --- a/pkg/go.sum +++ b/pkg/go.sum @@ -783,6 +783,8 @@ github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKEN github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iLogtail/VictoriaMetrics v1.83.4-ilogtail h1:LRDJt9eUKKhHdwPJRbC6tgtiMs/0XTjlCz1dl2pzRt0= +github.com/iLogtail/VictoriaMetrics v1.83.4-ilogtail/go.mod h1:JagjwAO58g1WNpyr6x/lrQqMTf99d/WU/yxjADxBz8E= github.com/iLogtail/handy v0.0.0-20230327021402-6a47ec586270/go.mod h1:6ai2R0qBm3xL13e10jwvyIf91Spxvo/yREZE9KOz7so= github.com/iLogtail/jfr-parser v0.6.0 h1:dNaQ0Ng2BLE5uxrhUQwtx1q7O9LIQFpMthl3SV326AU= github.com/iLogtail/jfr-parser v0.6.0/go.mod h1:ZMcbJjfDkOwElEK8CvUJbpetztRWRXszCmf5WU0erV8= @@ -944,6 +946,8 @@ github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= diff --git a/pkg/pipeline/extensions/encoder.go b/pkg/pipeline/extensions/encoder.go new file mode 100644 index 0000000000..85e5fcb6a5 --- /dev/null +++ b/pkg/pipeline/extensions/encoder.go @@ -0,0 +1,53 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extensions + +import ( + "github.com/alibaba/ilogtail/pkg/models" + "github.com/alibaba/ilogtail/pkg/pipeline" + "github.com/alibaba/ilogtail/pkg/protocol" +) + +// Encoder encodes data of iLogtail data models into bytes. +// Different drivers with different encoding protocols implement Encoder interface. +// +// drivers: raw, influxdb, prometheus, sls, ... +type Encoder interface { + EncoderV1 + EncoderV2 +} + +// EncoderV1 supports v1 pipeline plugin interface, +// encodes data of v1 model into bytes. +// +// drivers: sls, influxdb, ... +type EncoderV1 interface { + EncodeV1(*protocol.LogGroup) ([][]byte, error) + EncodeBatchV1([]*protocol.LogGroup) ([][]byte, error) +} + +// EncoderV2 supports v2 pipeline plugin interface, +// encodes data of v2 model into bytes. +// +// drivers: raw, influxdb, prometheus, ... +type EncoderV2 interface { + EncodeV2(*models.PipelineGroupEvents) ([][]byte, error) + EncodeBatchV2([]*models.PipelineGroupEvents) ([][]byte, error) +} + +type EncoderExtension interface { + Encoder + pipeline.Extension +} diff --git a/pkg/protocol/encoder/common/common.go b/pkg/protocol/encoder/common/common.go new file mode 100644 index 0000000000..4caf8109df --- /dev/null +++ b/pkg/protocol/encoder/common/common.go @@ -0,0 +1,19 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +const ( + ProtocolPrometheus = "prometheus" +) diff --git a/pkg/protocol/encoder/encoder.go b/pkg/protocol/encoder/encoder.go new file mode 100644 index 0000000000..81fc0667ce --- /dev/null +++ b/pkg/protocol/encoder/encoder.go @@ -0,0 +1,40 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoder + +import ( + "fmt" + "strings" + + "github.com/mitchellh/mapstructure" + + "github.com/alibaba/ilogtail/pkg/pipeline/extensions" + "github.com/alibaba/ilogtail/pkg/protocol/encoder/common" + "github.com/alibaba/ilogtail/pkg/protocol/encoder/prometheus" +) + +func NewEncoder(format string, options map[string]any) (extensions.Encoder, error) { + switch strings.TrimSpace(strings.ToLower(format)) { + case common.ProtocolPrometheus: + var opt prometheus.Option + if err := mapstructure.Decode(options, &opt); err != nil { + return nil, err + } + return prometheus.NewPromEncoder(opt.SeriesLimit), nil + + default: + return nil, fmt.Errorf("not supported encode format: %s", format) + } +} diff --git a/pkg/protocol/encoder/prometheus/encoder_prometheus.go b/pkg/protocol/encoder/prometheus/encoder_prometheus.go new file mode 100644 index 0000000000..4be1ef5792 --- /dev/null +++ b/pkg/protocol/encoder/prometheus/encoder_prometheus.go @@ -0,0 +1,126 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "context" + "errors" + + "github.com/alibaba/ilogtail/pkg/logger" + "github.com/alibaba/ilogtail/pkg/models" + "github.com/alibaba/ilogtail/pkg/pipeline/extensions" + "github.com/alibaba/ilogtail/pkg/protocol" +) + +const defaultSeriesLimit = 1000 + +var errNilOrZeroGroupEvents = errors.New("nil or zero group events") + +type Option struct { + SeriesLimit int // config for prometheus encoder +} + +func NewPromEncoder(seriesLimit int) extensions.Encoder { + return newPromEncoder(seriesLimit) +} + +type Encoder struct { + SeriesLimit int +} + +func newPromEncoder(seriesLimit int) *Encoder { + if seriesLimit <= 0 { + seriesLimit = defaultSeriesLimit + } + + return &Encoder{ + SeriesLimit: seriesLimit, + } +} + +func (p *Encoder) EncodeV1(logGroups *protocol.LogGroup) ([][]byte, error) { + // TODO implement me + return nil, nil +} + +func (p *Encoder) EncodeBatchV1(logGroups []*protocol.LogGroup) ([][]byte, error) { + // TODO implement me + return nil, nil +} + +func (p *Encoder) EncodeV2(groupEvents *models.PipelineGroupEvents) ([][]byte, error) { + if groupEvents == nil || len(groupEvents.Events) == 0 { + return nil, errNilOrZeroGroupEvents + } + + var res [][]byte + + wr := getWriteRequest(p.SeriesLimit) + defer putWriteRequest(wr) + + for _, event := range groupEvents.Events { + if event == nil { + logger.Debugf(context.Background(), "nil event") + continue + } + + if event.GetType() != models.EventTypeMetric { + logger.Debugf(context.Background(), "event type (%s) not metric", event.GetName()) + continue + } + + metricEvent, ok := event.(*models.Metric) + if !ok { + logger.Debugf(context.Background(), "assert metric event type (%s) failed", event.GetName()) + continue + } + + wr.Timeseries = append(wr.Timeseries, genPromRemoteWriteTimeseries(metricEvent)) + if len(wr.Timeseries) >= p.SeriesLimit { + res = append(res, marshalBatchTimeseriesData(wr)) + wr.Timeseries = wr.Timeseries[:0] + } + } + + if len(wr.Timeseries) > 0 { + res = append(res, marshalBatchTimeseriesData(wr)) + wr.Timeseries = wr.Timeseries[:0] + } + + return res, nil +} + +func (p *Encoder) EncodeBatchV2(groupEventsSlice []*models.PipelineGroupEvents) ([][]byte, error) { + if len(groupEventsSlice) == 0 { + return nil, errNilOrZeroGroupEvents + } + + var res [][]byte + + for _, groupEvents := range groupEventsSlice { + bytes, err := p.EncodeV2(groupEvents) + if err != nil { + continue + } + + res = append(res, bytes...) + } + + if res == nil { + return nil, errNilOrZeroGroupEvents + } + + return res, nil +} diff --git a/pkg/protocol/encoder/prometheus/encoder_prometheus_test.go b/pkg/protocol/encoder/prometheus/encoder_prometheus_test.go new file mode 100644 index 0000000000..2091cf5d1d --- /dev/null +++ b/pkg/protocol/encoder/prometheus/encoder_prometheus_test.go @@ -0,0 +1,505 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "strconv" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" + pb "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + + "github.com/alibaba/ilogtail/pkg/models" +) + +// 场景:性能测试,确定性能基线(UT粒度) +// 因子:所有 Event type 均为 models.EventTypeMetric +// 因子:所有 PipelineEvent interface 的实现均为 *models.Metric +// 预期:EncodeV2 和 EncodeBatchV2 性能相当(实现上 EncodeBatchV2 会循环调用 EncodeV2) +// Benchmark结果(每次在具体数值上可能会有差异,但数量级相同): +// goos: darwin +// goarch: arm64 +// pkg: github.com/alibaba/ilogtail/pkg/protocol/encoder/prometheus +// BenchmarkV2Encode +// BenchmarkV2Encode/EncodeV2 +// BenchmarkV2Encode/EncodeV2-12 685 1655657 ns/op +// BenchmarkV2Encode/BatchEncodeV2 +// BenchmarkV2Encode/BatchEncodeV2-12 716 1639491 ns/op +// PASS +func BenchmarkV2Encode(b *testing.B) { + // given + p := NewPromEncoder(19) + groupEventsSlice := genNormalPipelineGroupEventsSlice(100) + want := append([]*models.PipelineGroupEvents(nil), groupEventsSlice...) + + b.Run("EncodeV2", func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + for _, groupEvents := range groupEventsSlice { + p.EncodeV2(groupEvents) + } + } + }) + assert.Equal(b, want, groupEventsSlice) + + b.Run("BatchEncodeV2", func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + p.EncodeBatchV2(groupEventsSlice) + } + }) + assert.Equal(b, want, groupEventsSlice) +} + +// 场景:V2 Encode接口功能测试 +// 说明:EncodeBatchV2 内部会调用 EncodeV2,所以也同时测试了 EncodeV2 的正常逻辑的功能 +// 因子:所有 Event type 均为 models.EventTypeMetric +// 因子:所有 PipelineEvent interface 的实现均为「正常的*models.Metric」(而不是new(models.Metric)), +// 具体区别在于正常的*models.Metric,其中的Tags、Value等都不是nil(如果为nil,会触发序列化的异常逻辑) +// 预期:V2 Encode逻辑正常(正常流程都能正确处理),返回的error类型为nil,[][]byte不为nil +func TestV2Encode_ShouldReturnNoError_GivenNormalDataOfPipelineGroupEvents(t *testing.T) { + // given + groupEventsSlice1 := genNormalPipelineGroupEventsSlice(100) + groupEventsSlice2 := genNormalPipelineGroupEventsSlice(100) + p := NewPromEncoder(19) + + // when + // then + data1, err1 := p.EncodeBatchV2(groupEventsSlice1) + assert.NoError(t, err1) + data2, err2 := p.EncodeBatchV2(groupEventsSlice2) + assert.NoError(t, err2) + + assert.Equal(t, len(data2), len(data1)) +} + +// 场景:V2 Encode接口功能测试(异常数据,非全nil或0值) +// 说明:尽管 EncodeBatchV2 内部会调用 EncodeV2,但异常情况可能是 EncodeBatchV2 侧的防御, +// 所以还需要测试异常情况下 EncodeV2 的功能 +// 因子:并非所有 Event type 均为 models.EventTypeMetric(e.g. 还可能是 models.EventTypeLogging 等) +// 因子:PipelineEvent interface 的实现,部分是「正常的*models.Metric」,部分为 nil,部分为new(models.Metric), +// 部分为其它(e.g. *models.Log 等) +// 预期:Encode逻辑正常(异常流程也能正确处理),返回的error类型不为nil,[][]byte为nil +func TestV2Encode_ShouldReturnError_GivenAbNormalDataOfPipelineGroupEvents(t *testing.T) { + // given + groupEventsSlice1 := genPipelineGroupEventsSliceIncludingAbnormalData(100) + groupEventsSlice2 := genPipelineGroupEventsSliceIncludingAbnormalData(100) + assert.Equal(t, len(groupEventsSlice1), len(groupEventsSlice2)) + p := NewPromEncoder(19) + + // when + // then + t.Run("Test EncodeV2 with abnormal data input", func(t *testing.T) { + for i, groupEvents := range groupEventsSlice1 { + data1, err1 := p.EncodeV2(groupEvents) + data2, err2 := p.EncodeV2(groupEventsSlice2[i]) + if err1 != nil { + assert.Error(t, err2) + assert.Equal(t, err1, err2) + } else { + assert.NoError(t, err2) + assert.Equal(t, len(data2), len(data1)) + } + } + }) + + t.Run("Test EncodeBatchV2 with abnormal data input", func(t *testing.T) { + data1, err1 := p.EncodeBatchV2(groupEventsSlice1) + assert.NoError(t, err1) + data2, err2 := p.EncodeBatchV2(groupEventsSlice2) + assert.NoError(t, err2) + + assert.Equal(t, len(data2), len(data1)) + }) +} + +// 场景:V2 Encode接口功能测试(异常数据,全nil或0值) +// 说明:尽管 EncodeBatchV2 内部会调用 EncodeV2,但异常情况可能是 EncodeBatchV2 侧的防御, +// 所以还需要测试异常情况下 EncodeV2 的功能 +// 因子:所有 *models.PipelineGroupEvents 及 []*models.PipelineGroupEvents 底层为 nil 或者 长度为0的切片 +// 预期:Encode逻辑正常(异常流程也能正确处理),返回的error类型不为nil,[][]byte为nil +func TestV2Encode_ShouldReturnError_GivenNilOrZeroDataOfPipelineGroupEvents(t *testing.T) { + // given + p := NewPromEncoder(19) + nilOrZeroGroupEventsSlices := []*models.PipelineGroupEvents{ + nil, + {}, // same as {Events: nil}, + {Events: make([]models.PipelineEvent, 0)}, + } + nilOrZeroGroupEventsSlicesEx := [][]*models.PipelineGroupEvents{ + nil, + {}, // same as {nil} + {{Events: nil}}, + nilOrZeroGroupEventsSlices, + } + + // when + // then + t.Run("Test EncodeV2 with nil or zero data input", func(t *testing.T) { + for _, input := range nilOrZeroGroupEventsSlices { + data, err := p.EncodeV2(input) + assert.Error(t, err) + assert.Nil(t, data) + } + }) + + t.Run("Test EncodeBatchV2 with nil or zero data input", func(t *testing.T) { + for _, input := range nilOrZeroGroupEventsSlicesEx { + data, err := p.EncodeBatchV2(input) + assert.Error(t, err) + assert.Nil(t, data) + } + }) +} + +// 场景:V2 Encode接口功能测试 +// 说明:EncoderBatchV2 内部会调用 EncoderV2,所以也同时测试了 EncoderV2 的功能 +// 因子:所有 Event type 均为 models.EventTypeMetric +// 因子:所有 PipelineEvent interface 的实现均为 *models.Metric +// 因子:每个 metric event 生成的 *models.Metric.Tags 中的 tag 仅一个 +// (确保 Encode 时 range map不用考虑 range go原生map每次顺序随机,从而导致2次Encode相同数据后得到的结果不同) +// PS:如果不这么做,就要对map做转化,先变成 range 保序的 slice,再 Encode; +// 但测试的功能点是Encode,所以采用上述方法绕过range go原生map每次顺序随机的特点,完成功能测试 +// 预期:Encode逻辑正常(正常流程都能正确处理),返回的error类型为nil,[][]byte不为nil,且两次Encode后返回的数据相同 +func TestEncoderBatchV2_ShouldReturnNoErrorAndEqualData_GivenNormalDataOfDataOfPipelineGroupEventsWithSingleTag(t *testing.T) { + // given + groupEventsSlice1 := genPipelineGroupEventsSliceSingleTag(100) + groupEventsSlice2 := genPipelineGroupEventsSliceSingleTag(100) + p := NewPromEncoder(19) + + // when + // then + data1, err1 := p.EncodeBatchV2(groupEventsSlice1) + assert.NoError(t, err1) + data2, err2 := p.EncodeBatchV2(groupEventsSlice2) + assert.NoError(t, err2) + + assert.Equal(t, data2, data1) +} + +// 场景:V2 Encode接口功能测试 +// 说明:EncoderBatchV2 内部会调用 EncoderV2,所以也同时测试了 EncoderV2 的功能 +// 因子:所有 Event type 均为 models.EventTypeMetric +// 因子:所有 PipelineEvent interface 的实现均为 *models.Metric +// 因子:每个 metric event 生成的 *models.Metric.Tags 中的 tag 仅一个 +// (确保 Encode 时 range map不用考虑 range go原生map每次顺序随机,从而导致2次Encode相同数据后得到的结果不同) +// PS:如果不这么做,就要对map做转化,先变成 range 保序的 slice,再 Encode; +// 但测试的功能点是Encode,所以采用上述方法绕过range go原生map每次顺序随机的特点,完成功能测试 +// 因子:对Encode后的数据进行Decode +// 因子:「构造的用例数据」的长度(len([]*models.PipelineGroupEvents))未超过 series limit +// 预期:「构造的用例数据」和「对用例数据先Encode再Decode后的数据」相等 +func TestEncoderBatchV2_ShouldDecodeSuccess_GivenNormalDataOfDataOfPipelineGroupEventsWithSingleTagNotExceedingSeriesLimit(t *testing.T) { + // given + seriesLimit := 19 + n := seriesLimit + wantGroupEventsSlice := genPipelineGroupEventsSliceSingleTag(n) + p := NewPromEncoder(seriesLimit) + data, err := p.EncodeBatchV2(wantGroupEventsSlice) + assert.NoError(t, err) + + // when + // then + gotGroupEventsSlice, err := DecodeBatchV2(data) + assert.NoError(t, err) + assert.Equal(t, wantGroupEventsSlice, gotGroupEventsSlice) +} + +// 场景:V2 Encode接口功能测试 +// 说明:EncoderBatchV2 内部会调用 EncoderV2,所以也同时测试了 EncoderV2 的功能 +// 因子:所有 Event type 均为 models.EventTypeMetric +// 因子:所有 PipelineEvent interface 的实现均为 *models.Metric +// 因子:每个 metric event 生成的 *models.Metric.Tags 中的 tag 仅一个 +// (确保 Encode 时 range map不用考虑 range go原生map每次顺序随机,从而导致2次Encode相同数据后得到的结果不同) +// PS:如果不这么做,就要对map做转化,先变成 range 保序的 slice,再 Encode; +// 但测试的功能点是Encode,所以采用上述方法绕过range go原生map每次顺序随机的特点,完成功能测试 +// 因子:对Encode后的数据进行Decode +// 因子:「构造的用例数据」的长度(len([]*models.PipelineGroupEvents))超过 series limit +// 预期:「构造的用例数据」的长度小于「对用例数据先Encode再Decode后的数据」的长度,且用 expectedLen 计算后的长度相等 +// PS:expectedLen 的计算方法,其实是和 genPipelineGroupEventsSlice 生成用例及根据series limit确定encode批次 +// 的逻辑相关,和 Encode 本身的逻辑无关 +func TestEncoderBatchV2_ShouldDecodeSuccess_GivenNormalDataOfDataOfPipelineGroupEventsWithSingleTagExceedingSeriesLimit(t *testing.T) { + // given + seriesLimit := 19 + n := 100 + wantGroupEventsSlice := genPipelineGroupEventsSliceSingleTag(n) + assert.Equal(t, n, len(wantGroupEventsSlice)) + p := NewPromEncoder(seriesLimit) + data, err := p.EncodeBatchV2(wantGroupEventsSlice) + assert.NoError(t, err) + expectedLen := func(limit, length int) int { + // make sure limit > 0 && length > 0 + if limit <= 0 || length <= 0 { + return -1 + } + + mod := length % limit + mul := length / limit + + res := 0 + for i := 0; i <= mul; i++ { + res += i * limit + } + res += (mul + 1) * mod + + return res + } + + // when + gotGroupEventsSlice, err := DecodeBatchV2(data) + assert.NoError(t, err) + + // then + assert.Equal(t, expectedLen(seriesLimit, n), len(gotGroupEventsSlice)) +} + +func genNormalPipelineGroupEventsSlice(n int) []*models.PipelineGroupEvents { + return genPipelineGroupEventsSlice(n, genPipelineEvent) +} + +func genPipelineGroupEventsSliceIncludingAbnormalData(n int) []*models.PipelineGroupEvents { + return genPipelineGroupEventsSlice(n, genPipelineEventIncludingAbnormalData) +} + +func genPipelineGroupEventsSliceSingleTag(n int) []*models.PipelineGroupEvents { + return genPipelineGroupEventsSlice(n, genPipelineEventSingleTag) +} + +func genPipelineGroupEventsSlice(n int, genPipelineEventFn func(int) []models.PipelineEvent) []*models.PipelineGroupEvents { + res := make([]*models.PipelineGroupEvents, 0, n) + for i := 1; i <= n; i++ { + res = append(res, &models.PipelineGroupEvents{ + Group: models.NewGroup(models.NewMetadata(), models.NewTags()), + Events: genPipelineEventFn(i), + }) + } + + return res +} + +func genPipelineEvent(n int) []models.PipelineEvent { + res := make([]models.PipelineEvent, 0, n) + for i := 1; i <= n; i++ { + res = append(res, genMetric(i)) + } + + return res +} + +func genMetric(n int) *models.Metric { + i := strconv.Itoa(n) + tags := models.NewKeyValues[string]() + tags.AddAll(map[string]string{ + // range map will out of order + "a" + i: "A" + i, + "b" + i: "B" + i, + "c" + i: "C" + i, + "d" + i: "D" + i, + }) + + return &models.Metric{ + Timestamp: 11111111 * uint64(n), + Tags: tags, + Value: &models.MetricSingleValue{Value: 1.1 * float64(n)}, + } +} + +func genPipelineEventIncludingAbnormalData(n int) []models.PipelineEvent { + res := make([]models.PipelineEvent, 0, n) + for i := 1; i <= n; i++ { + if i&1 == 0 { // i is even number + // normal data + res = append(res, genMetric(i)) + continue + } + + // i is odd number + // abnormal data + if i%3 == 0 { + // abnormal data: nil data + res = append(res, nil) + continue + } + + if i%5 == 0 { + // abnormal data: zero data + // PS: + // 1. 这里只是从边界情况考虑,构造了这种异常值 + // 但实际场景中,不会直接 new(models.Metric) 或者 &models.Metric{} 这样创建 zero data, + // 一般都是用 models.NewMetric|NewSingleValueMetric|NewMultiValuesMetric 等 构造函数(工厂模式)来创建, + // 上述构造函数位置:ilogtail/pkg/models/factory.go + // 2. 此外,也可以给 *models.Metric 的 GetTag 方法增加下 *models.Metric.Tag 为 nil 时的保护 + // (参考其 GetValue 方法的实现),文件位置:ilogtail/pkg/models/metric.go + res = append(res, new(models.Metric)) + continue + } + + // abnormal data: other event type not models.EventTypeMetric + res = append(res, new(models.Log)) + } + + return res +} + +func genPipelineEventSingleTag(n int) []models.PipelineEvent { + res := make([]models.PipelineEvent, 0, n) + for i := 1; i <= n; i++ { + res = append(res, genMetricSingleTag(i)) + } + + return res +} + +func genMetricSingleTag(n int) *models.Metric { + metricName := "test_metric" + i := strconv.Itoa(n) + tags := models.NewTagsWithMap(map[string]string{ + // only single tag + // keep range in order + "x" + i: "X" + i, + }) + + dataPoint := pb.Sample{Timestamp: 11111111 * int64(n), Value: 1.1 * float64(n)} + + return models.NewSingleValueMetric( + metricName, // value of key "__name__" in prometheus + models.MetricTypeGauge, + tags, + model.Time(dataPoint.Timestamp).Time().UnixNano(), + dataPoint.Value, + ) +} + +func DecodeBatchV2(data [][]byte) ([]*models.PipelineGroupEvents, error) { + if len(data) == 0 { + return nil, errors.New("no data to decode") + } + + var res []*models.PipelineGroupEvents + + meta, commonTags := models.NewMetadata(), models.NewTags() + for _, d := range data { + groupEvents, err := convertPromRequestToPipelineGroupEvents(d, meta, commonTags) + if err != nil { + continue + } + + res = append(res, groupEvents) + } + + return res, nil +} + +func convertPromRequestToPipelineGroupEvents(data []byte, metaInfo models.Metadata, commonTags models.Tags) (*models.PipelineGroupEvents, error) { + wr, err := unmarshalBatchTimeseriesData(data) + if err != nil { + return nil, err + } + + groupEvent := &models.PipelineGroupEvents{ + Group: models.NewGroup(metaInfo, commonTags), + } + + for _, ts := range wr.Timeseries { + var metricName string + tags := models.NewTags() + for _, label := range ts.Labels { + if label.Name == metricNameKey { + metricName = label.Value + continue + } + tags.Add(label.Name, label.Value) + } + + for _, dataPoint := range ts.Samples { + metric := models.NewSingleValueMetric( + metricName, + models.MetricTypeGauge, + tags, + // Decode (during input_prometheus stage) makes timestamp + // with unix milliseconds into unix nanoseconds, + // e.g. "model.Time(milliseconds).Time().UnixNano()". + model.Time(dataPoint.Timestamp).Time().UnixNano(), + dataPoint.Value, + ) + groupEvent.Events = append(groupEvent.Events, metric) + } + } + + return groupEvent, nil +} + +func unmarshalBatchTimeseriesData(data []byte) (*pb.WriteRequest, error) { + wr := new(prompb.WriteRequest) + if err := wr.Unmarshal(data); err != nil { + return nil, err + } + + return convertPrompbToVictoriaMetricspb(wr) +} + +func convertPrompbToVictoriaMetricspb(wr *prompb.WriteRequest) (*pb.WriteRequest, error) { + if wr == nil || len(wr.Timeseries) == 0 { + return nil, errors.New("nil *prompb.WriteRequest") + } + + res := &pb.WriteRequest{ + Timeseries: make([]pb.TimeSeries, 0, len(wr.Timeseries)), + } + for _, tss := range wr.Timeseries { + res.Timeseries = append(res.Timeseries, pb.TimeSeries{ + Labels: convertToVMLabels(tss.Labels), + Samples: convertToVMSamples(tss.Samples), + }) + } + + return res, nil +} + +func convertToVMLabels(labels []prompb.Label) []pb.Label { + if len(labels) == 0 { + return nil + } + + res := make([]pb.Label, 0, len(labels)) + for _, label := range labels { + res = append(res, pb.Label{ + Name: string(label.Name), + Value: string(label.Value), + }) + } + + return res +} + +func convertToVMSamples(samples []prompb.Sample) []pb.Sample { + if len(samples) == 0 { + return nil + } + + res := make([]pb.Sample, 0, len(samples)) + for _, sample := range samples { + res = append(res, pb.Sample{ + Value: sample.Value, + Timestamp: sample.Timestamp, + }) + } + + return res +} diff --git a/pkg/protocol/encoder/prometheus/utils.go b/pkg/protocol/encoder/prometheus/utils.go new file mode 100644 index 0000000000..622b869e63 --- /dev/null +++ b/pkg/protocol/encoder/prometheus/utils.go @@ -0,0 +1,118 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "context" + "sort" + "sync" + + pb "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + + "github.com/alibaba/ilogtail/pkg/logger" + "github.com/alibaba/ilogtail/pkg/models" +) + +const metricNameKey = "__name__" + +func marshalBatchTimeseriesData(wr *pb.WriteRequest) []byte { + if len(wr.Timeseries) == 0 { + return nil + } + + data, err := wr.Marshal() + if err != nil { + // logger.Error(context.Background(), alarmType, "pb marshal err", err) + return nil + } + + return data +} + +func genPromRemoteWriteTimeseries(event *models.Metric) pb.TimeSeries { + return pb.TimeSeries{ + Labels: lexicographicalSort(append(convTagsToLabels(event.GetTags()), pb.Label{Name: metricNameKey, Value: event.GetName()})), + Samples: []pb.Sample{{ + Value: event.GetValue().GetSingleValue(), + + // Decode (during input_prometheus stage) makes timestamp + // with unix milliseconds into unix nanoseconds, + // e.g. "model.Time(milliseconds).Time().UnixNano()". + // + // Encode (during flusher_prometheus stage) conversely makes timestamp + // with unix nanoseconds into unix milliseconds, + // e.g. "int64(nanoseconds)/10^6". + Timestamp: int64(event.GetTimestamp()) / 1e6, + }}, + } +} + +func convTagsToLabels(tags models.Tags) []pb.Label { + if tags == nil { + logger.Debugf(context.Background(), "get nil models.Tags") + return nil + } + + labels := make([]pb.Label, 0, tags.Len()) + for k, v := range tags.Iterator() { + // MUST NOT contain any empty label names or values. + // reference: https://prometheus.io/docs/specs/remote_write_spec/#labels + if k != "" && v != "" { + labels = append(labels, pb.Label{Name: k, Value: v}) + } + } + + return labels +} + +// MUST have label names sorted in lexicographical order. +// reference: https://prometheus.io/docs/specs/remote_write_spec/#labels +func lexicographicalSort(labels []pb.Label) []pb.Label { + sort.Sort(promLabels(labels)) + + return labels +} + +type promLabels []pb.Label + +func (p promLabels) Len() int { + return len(p) +} + +func (p promLabels) Less(i, j int) bool { + return p[i].Name < p[j].Name +} + +func (p promLabels) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +var wrPool sync.Pool + +func getWriteRequest(seriesLimit int) *pb.WriteRequest { + wr := wrPool.Get() + if wr == nil { + return &pb.WriteRequest{ + Timeseries: make([]pb.TimeSeries, 0, seriesLimit), + } + } + + return wr.(*pb.WriteRequest) +} + +func putWriteRequest(wr *pb.WriteRequest) { + wr.Timeseries = wr.Timeseries[:0] + wrPool.Put(wr) +} diff --git a/pkg/protocol/encoder/prometheus/utils_test.go b/pkg/protocol/encoder/prometheus/utils_test.go new file mode 100644 index 0000000000..c84e4ab68f --- /dev/null +++ b/pkg/protocol/encoder/prometheus/utils_test.go @@ -0,0 +1,96 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "sort" + "testing" + + pb "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/stretchr/testify/assert" +) + +// 场景:Prometheus label names 字典序排序 +// 因子:乱序的 Prometheus label names +// 预期:Prometheus label names 按字典序排序 +func TestLexicographicalSort_ShouldSortedInLexicographicalOrder(t *testing.T) { + // given + labels := []pb.Label{ + {Name: "Tutorial", Value: "tutorial"}, + {Name: "Point", Value: "point"}, + {Name: "Java", Value: "java"}, + {Name: "C++", Value: "c++"}, + {Name: "Golang", Value: "golang"}, + {Name: metricNameKey, Value: "test_metric_name"}, + } + ans := []pb.Label{ + {Name: "C++", Value: "c++"}, + {Name: "Golang", Value: "golang"}, + {Name: "Java", Value: "java"}, + {Name: "Point", Value: "point"}, + {Name: "Tutorial", Value: "tutorial"}, + {Name: metricNameKey, Value: "test_metric_name"}, + } + assert.Equal(t, len(ans), len(labels)) + + // when + got := lexicographicalSort(labels) + + // then + assert.Equal(t, ans, got) +} + +// 场景:性能测试,确定 lexicographicalSort 字典序排序方法的性能 +// 因子:利用 lexicographicalSort(底层基于sort.Sort())对 Prometheus label names 进行字典序排序 +// 预期:lexicographicalSort 和 sort.Strings 对 Prometheus label names 的字典序排序性能相当(数量级相同) +// goos: darwin +// goarch: arm64 +// pkg: github.com/alibaba/ilogtail/pkg/protocol/encoder/prometheus +// BenchmarkLexicographicalSort +// BenchmarkLexicographicalSort/lexicographicalSort +// BenchmarkLexicographicalSort/lexicographicalSort-12 23059904 47.51 ns/op +// BenchmarkLexicographicalSort/sort.Strings +// BenchmarkLexicographicalSort/sort.Strings-12 25321753 47.30 ns/op +// PASS +func BenchmarkLexicographicalSort(b *testing.B) { + prometheusLabels := []pb.Label{ + {Name: "Tutorial", Value: "tutorial"}, + {Name: "Point", Value: "point"}, + {Name: "Java", Value: "java"}, + {Name: "C++", Value: "c++"}, + {Name: "Golang", Value: "golang"}, + {Name: metricNameKey, Value: "test_metric_name"}, + } + stringLabels := []string{ + "Tutorial", + "Point", + "Java", + "C++", + "Golang", + metricNameKey, + } + + b.Run("lexicographicalSort", func(b *testing.B) { + for i := 0; i < b.N; i++ { + lexicographicalSort(prometheusLabels) + } + }) + + b.Run("sort.Strings", func(b *testing.B) { + for i := 0; i < b.N; i++ { + sort.Strings(stringLabels) + } + }) +} diff --git a/pluginmanager/logstore_config.go b/pluginmanager/logstore_config.go index 9693fd824c..70c38eac51 100644 --- a/pluginmanager/logstore_config.go +++ b/pluginmanager/logstore_config.go @@ -323,12 +323,15 @@ func hasDockerStdoutInput(plugins map[string]interface{}) bool { if !valid { continue } - typeName, valid := cfg["type"] + pluginTypeWithID, valid := cfg["type"] if !valid { continue } - if val, valid := typeName.(string); valid && val == input.ServiceDockerStdoutPluginName { - return true + if val, valid := pluginTypeWithID.(string); valid { + pluginType := getPluginType(val) + if pluginType == input.ServiceDockerStdoutPluginName { + return true + } } } return false @@ -377,11 +380,16 @@ func createLogstoreConfig(project string, logstore string, configName string, lo if !valid { continue } - typeName, valid := cfg["type"] + pluginTypeWithID, valid := cfg["type"] + if !valid { + continue + } + val, valid := pluginTypeWithID.(string) if !valid { continue } - if val, valid := typeName.(string); valid && (val == input.ServiceDockerStdoutPluginName || val == input.MetricDocierFilePluginName) { + pluginType := getPluginType(val) + if pluginType == input.ServiceDockerStdoutPluginName || pluginType == input.MetricDocierFilePluginName { configDetail, valid := cfg["detail"] if !valid { continue @@ -477,16 +485,19 @@ func createLogstoreConfig(project string, logstore string, configName string, lo if !ok { return nil, fmt.Errorf("invalid extension type") } - typeName, ok := extension["type"].(string) - if !ok { - return nil, fmt.Errorf("invalid extension type") - } - logger.Debug(contextImp.GetRuntimeContext(), "add extension", typeName) - err = loadExtension(logstoreC.genPluginMeta(typeName, false, false), logstoreC, extension["detail"]) - if err != nil { - return nil, err + if pluginTypeWithID, ok := extension["type"]; ok { + pluginTypeWithIDStr, ok := pluginTypeWithID.(string) + if !ok { + return nil, fmt.Errorf("invalid extension type") + } + pluginType := getPluginType(pluginTypeWithIDStr) + logger.Debug(contextImp.GetRuntimeContext(), "add extension", pluginType) + err = loadExtension(logstoreC.genPluginMeta(pluginTypeWithIDStr, false, false), logstoreC, extension["detail"]) + if err != nil { + return nil, err + } + contextImp.AddPlugin(pluginType) } - contextImp.AddPlugin(typeName) } } @@ -497,20 +508,21 @@ func createLogstoreConfig(project string, logstore string, configName string, lo for _, inputInterface := range inputs { input, ok := inputInterface.(map[string]interface{}) if ok { - if typeName, ok := input["type"]; ok { - if typeNameStr, ok := typeName.(string); ok { - if _, isMetricInput := pipeline.MetricInputs[typeNameStr]; isMetricInput { + if pluginTypeWithID, ok := input["type"]; ok { + if pluginTypeWithIDStr, ok := pluginTypeWithID.(string); ok { + pluginType := getPluginType(pluginTypeWithIDStr) + if _, isMetricInput := pipeline.MetricInputs[pluginType]; isMetricInput { // Load MetricInput plugin defined in pipeline.MetricInputs // pipeline.MetricInputs will be renamed in a future version - err = loadMetric(logstoreC.genPluginMeta(typeNameStr, true, false), logstoreC, input["detail"]) - } else if _, isServiceInput := pipeline.ServiceInputs[typeNameStr]; isServiceInput { + err = loadMetric(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, false), logstoreC, input["detail"]) + } else if _, isServiceInput := pipeline.ServiceInputs[pluginType]; isServiceInput { // Load ServiceInput plugin defined in pipeline.ServiceInputs - err = loadService(logstoreC.genPluginMeta(typeNameStr, true, false), logstoreC, input["detail"]) + err = loadService(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, false), logstoreC, input["detail"]) } if err != nil { return nil, err } - contextImp.AddPlugin(typeNameStr) + contextImp.AddPlugin(pluginType) continue } } @@ -528,14 +540,15 @@ func createLogstoreConfig(project string, logstore string, configName string, lo for i, processorInterface := range processors { processor, ok := processorInterface.(map[string]interface{}) if ok { - if typeName, ok := processor["type"]; ok { - if typeNameStr, ok := typeName.(string); ok { - logger.Debug(contextImp.GetRuntimeContext(), "add processor", typeNameStr) - err = loadProcessor(logstoreC.genPluginMeta(typeNameStr, true, false), i, logstoreC, processor["detail"]) + if pluginTypeWithID, ok := processor["type"]; ok { + if pluginTypeWithIDStr, ok := pluginTypeWithID.(string); ok { + pluginType := getPluginType(pluginTypeWithIDStr) + logger.Debug(contextImp.GetRuntimeContext(), "add processor", pluginType) + err = loadProcessor(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, false), i, logstoreC, processor["detail"]) if err != nil { return nil, err } - contextImp.AddPlugin(typeNameStr) + contextImp.AddPlugin(pluginType) continue } } @@ -554,14 +567,15 @@ func createLogstoreConfig(project string, logstore string, configName string, lo for _, aggregatorInterface := range aggregators { aggregator, ok := aggregatorInterface.(map[string]interface{}) if ok { - if typeName, ok := aggregator["type"]; ok { - if typeNameStr, ok := typeName.(string); ok { - logger.Debug(contextImp.GetRuntimeContext(), "add aggregator", typeNameStr) - err = loadAggregator(logstoreC.genPluginMeta(typeNameStr, true, false), logstoreC, aggregator["detail"]) + if pluginTypeWithID, ok := aggregator["type"]; ok { + if pluginTypeWithIDStr, ok := pluginTypeWithID.(string); ok { + pluginType := getPluginType(pluginTypeWithIDStr) + logger.Debug(contextImp.GetRuntimeContext(), "add aggregator", pluginType) + err = loadAggregator(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, false), logstoreC, aggregator["detail"]) if err != nil { return nil, err } - contextImp.AddPlugin(typeNameStr) + contextImp.AddPlugin(pluginType) continue } } @@ -584,18 +598,19 @@ func createLogstoreConfig(project string, logstore string, configName string, lo for num, flusherInterface := range flushers { flusher, ok := flusherInterface.(map[string]interface{}) if ok { - if typeName, ok := flusher["type"]; ok { - if typeNameStr, ok := typeName.(string); ok { - logger.Debug(contextImp.GetRuntimeContext(), "add flusher", typeNameStr) + if pluginTypeWithID, ok := flusher["type"]; ok { + if pluginTypeWithIDStr, ok := pluginTypeWithID.(string); ok { + pluginType := getPluginType(pluginTypeWithIDStr) + logger.Debug(contextImp.GetRuntimeContext(), "add flusher", pluginType) lastOne := false if num == flushersLen-1 { lastOne = true } - err = loadFlusher(logstoreC.genPluginMeta(typeNameStr, true, lastOne), logstoreC, flusher["detail"]) + err = loadFlusher(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, lastOne), logstoreC, flusher["detail"]) if err != nil { return nil, err } - contextImp.AddPlugin(typeNameStr) + contextImp.AddPlugin(pluginType) continue } } @@ -660,7 +675,7 @@ func LoadLogstoreConfig(project string, logstore string, configName string, logs func loadBuiltinConfig(name string, project string, logstore string, configName string, cfgStr string) (*LogstoreConfig, error) { logger.Infof(context.Background(), "load built-in config %v, config name: %v, logstore: %v", name, configName, logstore) - return createLogstoreConfig(project, logstore, configName, 0, cfgStr) + return createLogstoreConfig(project, logstore, configName, -1, cfgStr) } // loadMetric creates a metric plugin object and append to logstoreConfig.MetricPlugins. @@ -768,14 +783,21 @@ func applyPluginConfig(plugin interface{}, pluginConfig interface{}) error { return err } -// Rule: pluginName=pluginType/pluginID#pluginPriority. -func (lc *LogstoreConfig) genPluginMeta(pluginName string, genNodeID bool, lastOne bool) *pipeline.PluginMeta { +// Rule: pluginTypeWithID=pluginType/pluginID#pluginPriority. +func getPluginType(pluginTypeWithID string) string { + if ids := strings.IndexByte(pluginTypeWithID, '/'); ids != -1 { + return pluginTypeWithID[:ids] + } + return pluginTypeWithID +} + +func (lc *LogstoreConfig) genPluginMeta(pluginTypeWithID string, genNodeID bool, lastOne bool) *pipeline.PluginMeta { nodeID := "" childNodeID := "" - if isPluginTypeWithID(pluginName) { - pluginTypeWithID := pluginName - if idx := strings.IndexByte(pluginName, '#'); idx != -1 { - pluginTypeWithID = pluginName[:idx] + if isPluginTypeWithID(pluginTypeWithID) { + pluginTypeWithID := pluginTypeWithID + if idx := strings.IndexByte(pluginTypeWithID, '#'); idx != -1 { + pluginTypeWithID = pluginTypeWithID[:idx] } if ids := strings.IndexByte(pluginTypeWithID, '/'); ids != -1 { if genNodeID { @@ -793,30 +815,31 @@ func (lc *LogstoreConfig) genPluginMeta(pluginName string, genNodeID bool, lastO } } } + pluginType := pluginTypeWithID pluginID := lc.genPluginID() if genNodeID { nodeID, childNodeID = lc.genNodeID(lastOne) } - pluginTypeWithID := fmt.Sprintf("%s/%s", pluginName, pluginID) + pluginTypeWithID = fmt.Sprintf("%s/%s", pluginType, pluginID) return &pipeline.PluginMeta{ PluginTypeWithID: pluginTypeWithID, - PluginType: pluginName, + PluginType: pluginType, PluginID: pluginID, NodeID: nodeID, ChildNodeID: childNodeID, } } -func isPluginTypeWithID(pluginName string) bool { - if idx := strings.IndexByte(pluginName, '/'); idx != -1 { +func isPluginTypeWithID(pluginTypeWithID string) bool { + if idx := strings.IndexByte(pluginTypeWithID, '/'); idx != -1 { return true } return false } -func GetPluginPriority(pluginName string) int { - if idx := strings.IndexByte(pluginName, '#'); idx != -1 { - val, err := strconv.Atoi(pluginName[idx+1:]) +func GetPluginPriority(pluginTypeWithID string) int { + if idx := strings.IndexByte(pluginTypeWithID, '#'); idx != -1 { + val, err := strconv.Atoi(pluginTypeWithID[idx+1:]) if err != nil { return 0 } diff --git a/pluginmanager/plugin_manager.go b/pluginmanager/plugin_manager.go index 77e4d2f4f2..f9dcb253da 100644 --- a/pluginmanager/plugin_manager.go +++ b/pluginmanager/plugin_manager.go @@ -98,11 +98,11 @@ var containerConfigJSON = `{ ] }` -func panicRecover(pluginName string) { +func panicRecover(pluginType string) { if err := recover(); err != nil { trace := make([]byte, 2048) runtime.Stack(trace, true) - logger.Error(context.Background(), "PLUGIN_RUNTIME_ALARM", "plugin", pluginName, "panicked", err, "stack", string(trace)) + logger.Error(context.Background(), "PLUGIN_RUNTIME_ALARM", "plugin", pluginType, "panicked", err, "stack", string(trace)) } } diff --git a/pluginmanager/plugin_runner_helper.go b/pluginmanager/plugin_runner_helper.go index 04e55d709d..be90760737 100644 --- a/pluginmanager/plugin_runner_helper.go +++ b/pluginmanager/plugin_runner_helper.go @@ -123,8 +123,8 @@ func GetConfigFlushers(runner PluginRunner) []pipeline.Flusher { return flushers } -func pluginUnImplementError(category pluginCategory, version ConfigVersion, pluginName string) error { - return fmt.Errorf("plugin does not implement %s%s. pluginType: %s", category, strings.ToUpper(string(version)), pluginName) +func pluginUnImplementError(category pluginCategory, version ConfigVersion, pluginType string) error { + return fmt.Errorf("plugin does not implement %s%s. pluginType: %s", category, strings.ToUpper(string(version)), pluginType) } func pluginCategoryUndefinedError(category pluginCategory) error { diff --git a/plugins.yml b/plugins.yml index 167c7e1482..f09d74d16d 100644 --- a/plugins.yml +++ b/plugins.yml @@ -25,6 +25,7 @@ plugins: - import: "github.com/alibaba/ilogtail/plugins/aggregator/skywalking" - import: "github.com/alibaba/ilogtail/plugins/extension/basicauth" - import: "github.com/alibaba/ilogtail/plugins/extension/default_decoder" + - import: "github.com/alibaba/ilogtail/plugins/extension/default_encoder" - import: "github.com/alibaba/ilogtail/plugins/extension/group_info_filter" - import: "github.com/alibaba/ilogtail/plugins/extension/request_breaker" - import: "github.com/alibaba/ilogtail/plugins/flusher/checker" diff --git a/plugins/aggregator/shardhash/aggregator_shardhash.go b/plugins/aggregator/shardhash/aggregator_shardhash.go index 3894fb0d82..c0a5d450d8 100644 --- a/plugins/aggregator/shardhash/aggregator_shardhash.go +++ b/plugins/aggregator/shardhash/aggregator_shardhash.go @@ -34,7 +34,7 @@ import ( const ( defaultShardCount = 8 maxShardCount = 512 - pluginName = "aggregator_shardhash" + pluginType = "aggregator_shardhash" ) // shardAggregator decides which agg (log group) the log belongs to. @@ -95,7 +95,7 @@ func (s *AggregatorShardHash) Init(context pipeline.Context, que pipeline.LogGro s.queue = que if len(s.SourceKeys) == 0 { - return 0, fmt.Errorf("plugin %v must specify SourceKeys", pluginName) + return 0, fmt.Errorf("plugin %v must specify SourceKeys", pluginType) } if s.ShardCount <= 0 || s.ShardCount > maxShardCount { return 0, fmt.Errorf("invalid ShardCount: %v, range [1, %v]", @@ -237,7 +237,7 @@ func newAggregatorShardHash() *AggregatorShardHash { } func init() { - pipeline.Aggregators[pluginName] = func() pipeline.Aggregator { + pipeline.Aggregators[pluginType] = func() pipeline.Aggregator { return newAggregatorShardHash() } } diff --git a/plugins/extension/default_encoder/default_encoder.go b/plugins/extension/default_encoder/default_encoder.go new file mode 100644 index 0000000000..646d4b2f16 --- /dev/null +++ b/plugins/extension/default_encoder/default_encoder.go @@ -0,0 +1,83 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaultencoder + +import ( + "encoding/json" + "errors" + + "github.com/alibaba/ilogtail/pkg/logger" + "github.com/alibaba/ilogtail/pkg/pipeline" + "github.com/alibaba/ilogtail/pkg/pipeline/extensions" + "github.com/alibaba/ilogtail/pkg/protocol/encoder" +) + +// ensure ExtensionDefaultEncoder implements the extensions.EncoderExtension interface +var _ extensions.EncoderExtension = (*ExtensionDefaultEncoder)(nil) + +type ExtensionDefaultEncoder struct { + extensions.Encoder + + Format string + options map[string]any // additional properties map to here +} + +func NewExtensionDefaultEncoder() *ExtensionDefaultEncoder { + return &ExtensionDefaultEncoder{} +} + +func (e *ExtensionDefaultEncoder) UnmarshalJSON(bytes []byte) error { + err := json.Unmarshal(bytes, &e.options) + if err != nil { + return err + } + + format, ok := e.options["Format"].(string) + if !ok { + return errors.New("field Format should be type of string") + } + + delete(e.options, "Format") + e.Format = format + + return nil +} + +func (e *ExtensionDefaultEncoder) Description() string { + return "default encoder that support builtin formats" +} + +func (e *ExtensionDefaultEncoder) Init(context pipeline.Context) error { + enc, err := encoder.NewEncoder(e.Format, e.options) + if err != nil { + return err + } + + e.Encoder = enc + + logger.Infof(context.GetRuntimeContext(), "%s init success, encoder: %s", e.Description(), e.Format) + + return nil +} + +func (e *ExtensionDefaultEncoder) Stop() error { + return nil +} + +func init() { + pipeline.AddExtensionCreator("ext_default_encoder", func() pipeline.Extension { + return NewExtensionDefaultEncoder() + }) +} diff --git a/plugins/extension/default_encoder/default_encoder_test.go b/plugins/extension/default_encoder/default_encoder_test.go new file mode 100644 index 0000000000..0e7a2d1dc6 --- /dev/null +++ b/plugins/extension/default_encoder/default_encoder_test.go @@ -0,0 +1,141 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaultencoder + +import ( + "encoding/json" + "fmt" + "testing" + + . "github.com/smartystreets/goconvey/convey" + + "github.com/alibaba/ilogtail/pkg/protocol/encoder/prometheus" + "github.com/alibaba/ilogtail/plugins/test/mock" +) + +// 场景:插件初始化 +// 因子:Format 字段存在 +// 因子:Prometheus Protocol +// 预期:初始化成功,且 Encoder 为 Prometheus Encoder +func TestEncoder_ShouldPassConfigToRealEncoder_GivenCorrectConfigInput(t *testing.T) { + Convey("Given correct config json string", t, func() { + e := NewExtensionDefaultEncoder() + So(e, ShouldNotBeNil) + So(e.Encoder, ShouldBeNil) + So(e.Format, ShouldBeEmpty) + So(e.options, ShouldBeNil) + + encodeProtocol := "prometheus" + optField, optValue := "SeriesLimit", 1024 + configJSONStr := fmt.Sprintf(`{"Format":"%s","%s":%d}`, encodeProtocol, optField, optValue) + // must using float64(optValue), not optValue + // https://github.com/smartystreets/goconvey/issues/437 + wantOpts := map[string]any{optField: float64(optValue)} + + Convey("Then should json unmarshal success", func() { + err := json.Unmarshal([]byte(configJSONStr), e) + So(err, ShouldBeNil) + So(e.Encoder, ShouldBeNil) + So(e.options, ShouldResemble, wantOpts) + So(e.Format, ShouldEqual, encodeProtocol) + + Convey("Then should init success", func() { + err = e.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldBeNil) + So(e.Encoder, ShouldNotBeNil) + + Convey("Then encoder implement should be *prometheus.Encoder", func() { + promEncoder, ok := e.Encoder.(*prometheus.Encoder) + So(ok, ShouldBeTrue) + So(promEncoder, ShouldNotBeNil) + So(promEncoder.SeriesLimit, ShouldEqual, optValue) + }) + }) + + Convey("Then should stop success", func() { + err := e.Stop() + So(err, ShouldBeNil) + }) + }) + }) +} + +// 场景:插件初始化 +// 因子:Format 字段存在 +// 因子:Unsupported Protocol +// 预期:初始化失败 +func TestEncoder_ShouldNotPassConfigToRealEncoder_GivenIncorrectConfigInput(t *testing.T) { + Convey("Given incorrect config json string but with Format field", t, func() { + e := NewExtensionDefaultEncoder() + So(e, ShouldNotBeNil) + So(e.Encoder, ShouldBeNil) + So(e.Format, ShouldBeEmpty) + So(e.options, ShouldBeNil) + + encodeProtocol := "unknown" + configJSONStr := fmt.Sprintf(`{"Format":"%s"}`, encodeProtocol) + + Convey("Then should json unmarshal success", func() { + err := json.Unmarshal([]byte(configJSONStr), e) + So(err, ShouldBeNil) + So(e.Encoder, ShouldBeNil) + So(e.Format, ShouldEqual, encodeProtocol) + + Convey("Then should init failed", func() { + err = e.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldNotBeNil) + So(e.Encoder, ShouldBeNil) + }) + + Convey("Then should stop success", func() { + err := e.Stop() + So(err, ShouldBeNil) + }) + }) + }) +} + +// 场景:插件初始化 +// 因子:Format 字段缺失 +// 预期:json unmarshal 失败,初始化失败 +func TestEncoder_ShouldUnmarshalFailed_GivenConfigWithoutFormat(t *testing.T) { + Convey("Given incorrect config json string and without Format field", t, func() { + e := NewExtensionDefaultEncoder() + So(e, ShouldNotBeNil) + So(e.Encoder, ShouldBeNil) + So(e.Format, ShouldBeEmpty) + So(e.options, ShouldBeNil) + + configJSONStr := `{"Unknown":"unknown"}` + + Convey("Then should json unmarshal failed", func() { + err := json.Unmarshal([]byte(configJSONStr), e) + So(err, ShouldNotBeNil) + So(e.Encoder, ShouldBeNil) + So(e.Format, ShouldBeEmpty) + + Convey("Then should init failed", func() { + err = e.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldNotBeNil) + So(e.Encoder, ShouldBeNil) + }) + + Convey("Then should stop success", func() { + err := e.Stop() + So(err, ShouldBeNil) + }) + }) + }) +} diff --git a/plugins/flusher/http/flusher_http.go b/plugins/flusher/http/flusher_http.go index aa324558df..d0a09442e5 100644 --- a/plugins/flusher/http/flusher_http.go +++ b/plugins/flusher/http/flusher_http.go @@ -16,6 +16,7 @@ package http import ( "bytes" + "compress/gzip" "crypto/rand" "errors" "fmt" @@ -26,6 +27,8 @@ import ( "sync" "time" + "github.com/golang/snappy" + "github.com/alibaba/ilogtail/pkg/fmtstr" "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" @@ -39,8 +42,9 @@ import ( const ( defaultTimeout = time.Minute - contentTypeHeader = "Content-Type" - defaultContentType = "application/octet-stream" + contentTypeHeader = "Content-Type" + defaultContentType = "application/octet-stream" + contentEncodingHeader = "Content-Encoding" ) var contentTypeMaps = map[string]string{ @@ -50,6 +54,11 @@ var contentTypeMaps = map[string]string{ converter.EncodingCustom: defaultContentType, } +var supportedCompressionType = map[string]any{ + "gzip": nil, + "snappy": nil, +} + type retryConfig struct { Enable bool // If enable retry, default is true MaxRetryTimes int // Max retry times, default is 3 @@ -57,31 +66,62 @@ type retryConfig struct { MaxDelay time.Duration // max delay time when retry, default is 30s } +type Client interface { + Do(req *http.Request) (*http.Response, error) +} + type FlusherHTTP struct { - RemoteURL string // RemoteURL to request - Headers map[string]string // Headers to append to the http request - Query map[string]string // Query parameters to append to the http request - Timeout time.Duration // Request timeout, default is 60s - Retry retryConfig // Retry strategy, default is retry 3 times with delay time begin from 1second, max to 30 seconds - Convert helper.ConvertConfig // Convert defines which protocol and format to convert to - Concurrency int // How many requests can be performed in concurrent - Authenticator *extensions.ExtensionConfig // name and options of the extensions.ClientAuthenticator extension to use - FlushInterceptor *extensions.ExtensionConfig // name and options of the extensions.FlushInterceptor extension to use - AsyncIntercept bool // intercept the event asynchronously - RequestInterceptors []extensions.ExtensionConfig // custom request interceptor settings - QueueCapacity int // capacity of channel + RemoteURL string // RemoteURL to request + Headers map[string]string // Headers to append to the http request + Query map[string]string // Query parameters to append to the http request + Timeout time.Duration // Request timeout, default is 60s + Retry retryConfig // Retry strategy, default is retry 3 times with delay time begin from 1second, max to 30 seconds + Encoder *extensions.ExtensionConfig // Encoder defines which protocol and format to encode to + Convert helper.ConvertConfig // Convert defines which protocol and format to convert to + Concurrency int // How many requests can be performed in concurrent + MaxConnsPerHost int // MaxConnsPerHost for http.Transport + MaxIdleConnsPerHost int // MaxIdleConnsPerHost for http.Transport + IdleConnTimeout time.Duration // IdleConnTimeout for http.Transport + WriteBufferSize int // WriteBufferSize for http.Transport + Authenticator *extensions.ExtensionConfig // name and options of the extensions.ClientAuthenticator extension to use + FlushInterceptor *extensions.ExtensionConfig // name and options of the extensions.FlushInterceptor extension to use + AsyncIntercept bool // intercept the event asynchronously + RequestInterceptors []extensions.ExtensionConfig // custom request interceptor settings + QueueCapacity int // capacity of channel + DropEventWhenQueueFull bool // If true, pipeline events will be dropped when the queue is full + Compression string // Compression type, support gzip and snappy at this moment. varKeys []string context pipeline.Context + encoder extensions.Encoder converter *converter.Converter - client *http.Client + client Client interceptor extensions.FlushInterceptor queue chan interface{} counter sync.WaitGroup } +func NewHTTPFlusher() *FlusherHTTP { + return &FlusherHTTP{ + QueueCapacity: 1024, + Timeout: defaultTimeout, + Concurrency: 1, + Convert: helper.ConvertConfig{ + Protocol: converter.ProtocolCustomSingle, + Encoding: converter.EncodingJSON, + IgnoreUnExpectedData: true, + }, + Retry: retryConfig{ + Enable: true, + MaxRetryTimes: 3, + InitialDelay: time.Second, + MaxDelay: 30 * time.Second, + }, + } +} + func (f *FlusherHTTP) Description() string { return "http flusher for ilogtail" } @@ -101,12 +141,16 @@ func (f *FlusherHTTP) Init(context pipeline.Context) error { return err } - converter, err := f.getConverter() - if err != nil { + var err error + if err = f.initEncoder(); err != nil { + logger.Error(f.context.GetRuntimeContext(), "FLUSHER_INIT_ALARM", "http flusher init encoder fail, error", err) + return err + } + + if err = f.initConverter(); err != nil { logger.Error(f.context.GetRuntimeContext(), "FLUSHER_INIT_ALARM", "http flusher init converter fail, error", err) return err } - f.converter = converter if f.FlushInterceptor != nil { var ext pipeline.Extension @@ -140,7 +184,9 @@ func (f *FlusherHTTP) Init(context pipeline.Context) error { f.buildVarKeys() f.fillRequestContentType() - logger.Info(f.context.GetRuntimeContext(), "http flusher init", "initialized") + logger.Info(f.context.GetRuntimeContext(), "http flusher init", "initialized", + "timeout", f.Timeout, + "compression", f.Compression) return nil } @@ -179,6 +225,53 @@ func (f *FlusherHTTP) Stop() error { return nil } +func (f *FlusherHTTP) SetHTTPClient(client Client) { + f.client = client +} + +func (f *FlusherHTTP) initEncoder() error { + if f.Encoder == nil { + return nil + } + + ext, err := f.context.GetExtension(f.Encoder.Type, f.Encoder.Options) + if err != nil { + return fmt.Errorf("get extension failed, error: %w", err) + } + + enc, ok := ext.(extensions.Encoder) + if !ok { + return fmt.Errorf("filter(%s) not implement interface extensions.Encoder", f.Encoder) + } + + f.encoder = enc + + return nil +} + +func (f *FlusherHTTP) initConverter() error { + conv, err := f.getConverter() + if err == nil { + f.converter = conv + return nil + } + + if f.encoder == nil { + // e.g. + // Prometheus http flusher does not config helper.ConvertConfig, + // but must config encoder config (i.e. prometheus encoder config). + // If err != nil, meanwhile http flusher has no encoder, + // flusher cannot work, so should return error. + return err + } + + return nil +} + +func (f *FlusherHTTP) getConverter() (*converter.Converter, error) { + return converter.NewConverterWithSep(f.Convert.Protocol, f.Convert.Encoding, f.Convert.Separator, f.Convert.IgnoreUnExpectedData, f.Convert.TagFieldsRename, f.Convert.ProtocolFieldsRename, f.context.GetPipelineScopeConfig()) +} + func (f *FlusherHTTP) initHTTPClient() error { transport := http.DefaultTransport if dt, ok := transport.(*http.Transport); ok { @@ -186,6 +279,18 @@ func (f *FlusherHTTP) initHTTPClient() error { if f.Concurrency > dt.MaxIdleConnsPerHost { dt.MaxIdleConnsPerHost = f.Concurrency + 1 } + if f.MaxConnsPerHost > dt.MaxConnsPerHost { + dt.MaxConnsPerHost = f.MaxConnsPerHost + } + if f.MaxIdleConnsPerHost > dt.MaxIdleConnsPerHost { + dt.MaxIdleConnsPerHost = f.MaxIdleConnsPerHost + } + if f.IdleConnTimeout > dt.IdleConnTimeout { + dt.IdleConnTimeout = f.IdleConnTimeout + } + if f.WriteBufferSize > 0 { + dt.WriteBufferSize = f.WriteBufferSize + } transport = dt } @@ -245,13 +350,19 @@ func (f *FlusherHTTP) initRequestInterceptors(transport http.RoundTripper) (http return transport, nil } -func (f *FlusherHTTP) getConverter() (*converter.Converter, error) { - return converter.NewConverterWithSep(f.Convert.Protocol, f.Convert.Encoding, f.Convert.Separator, f.Convert.IgnoreUnExpectedData, f.Convert.TagFieldsRename, f.Convert.ProtocolFieldsRename, f.context.GetPipelineScopeConfig()) -} - func (f *FlusherHTTP) addTask(log interface{}) { f.counter.Add(1) - f.queue <- log + + if f.DropEventWhenQueueFull { + select { + case f.queue <- log: + default: + f.counter.Done() + logger.Warningf(f.context.GetRuntimeContext(), "FLUSHER_FLUSH_ALARM", "http flusher dropped a group event since the queue is full") + } + } else { + f.queue <- log + } } func (f *FlusherHTTP) countDownTask() { @@ -259,14 +370,49 @@ func (f *FlusherHTTP) countDownTask() { } func (f *FlusherHTTP) runFlushTask() { + flushTaskFn, action := f.convertAndFlush, "convert" + if f.encoder != nil { + flushTaskFn, action = f.encodeAndFlush, "encode" + } + for data := range f.queue { - err := f.convertAndFlush(data) + err := flushTaskFn(data) if err != nil { - logger.Error(f.context.GetRuntimeContext(), "FLUSHER_FLUSH_ALARM", "http flusher failed convert or flush data, data dropped, error", err) + logger.Errorf(f.context.GetRuntimeContext(), "FLUSHER_FLUSH_ALARM", + "http flusher failed %s or flush data, data dropped, error: %s", action, err.Error()) } } } +func (f *FlusherHTTP) encodeAndFlush(event any) error { + defer f.countDownTask() + + var data [][]byte + var err error + + switch v := event.(type) { + case *models.PipelineGroupEvents: + data, err = f.encoder.EncodeV2(v) + + default: + return errors.New("unsupported event type") + } + + if err != nil { + return fmt.Errorf("http flusher encode event data fail, error: %w", err) + } + + for _, shard := range data { + if err = f.flushWithRetry(shard, nil); err != nil { + logger.Error(f.context.GetRuntimeContext(), "FLUSHER_FLUSH_ALARM", + "http flusher failed flush data after retry, data dropped, error", err, + "remote url", f.RemoteURL) + } + } + + return nil +} + func (f *FlusherHTTP) convertAndFlush(data interface{}) error { defer f.countDownTask() var logs interface{} @@ -345,8 +491,37 @@ func (f *FlusherHTTP) getNextRetryDelay(retryTime int) time.Duration { return time.Duration(harf + jitter.Int64()) } +func (f *FlusherHTTP) compressData(data []byte) (io.Reader, error) { + var reader io.Reader = bytes.NewReader(data) + if compressionType, ok := f.Headers[contentEncodingHeader]; ok { + switch compressionType { + case "gzip": + var buf bytes.Buffer + gw := gzip.NewWriter(&buf) + if _, err := gw.Write(data); err != nil { + return nil, err + } + if err := gw.Close(); err != nil { + return nil, err + } + reader = &buf + case "snappy": + compressedData := snappy.Encode(nil, data) + reader = bytes.NewReader(compressedData) + default: + } + } + return reader, nil +} + func (f *FlusherHTTP) flush(data []byte, varValues map[string]string) (ok, retryable bool, err error) { - req, err := http.NewRequest(http.MethodPost, f.RemoteURL, bytes.NewReader(data)) + reader, err := f.compressData(data) + if err != nil { + logger.Error(f.context.GetRuntimeContext(), "FLUSHER_FLUSH_ALARM", "create reader error", err) + return false, false, err + } + + req, err := http.NewRequest(http.MethodPost, f.RemoteURL, reader) if err != nil { logger.Error(f.context.GetRuntimeContext(), "FLUSHER_FLUSH_ALARM", "http flusher create request fail, error", err) return false, false, err @@ -450,6 +625,12 @@ func (f *FlusherHTTP) fillRequestContentType() { f.Headers = make(map[string]string, 4) } + if f.Compression != "" { + if _, ok := supportedCompressionType[f.Compression]; ok { + f.Headers[contentEncodingHeader] = f.Compression + } + } + _, ok := f.Headers[contentTypeHeader] if ok { return @@ -464,21 +645,6 @@ func (f *FlusherHTTP) fillRequestContentType() { func init() { pipeline.Flushers["flusher_http"] = func() pipeline.Flusher { - return &FlusherHTTP{ - QueueCapacity: 1024, - Timeout: defaultTimeout, - Concurrency: 1, - Convert: helper.ConvertConfig{ - Protocol: converter.ProtocolCustomSingle, - Encoding: converter.EncodingJSON, - IgnoreUnExpectedData: true, - }, - Retry: retryConfig{ - Enable: true, - MaxRetryTimes: 3, - InitialDelay: time.Second, - MaxDelay: 30 * time.Second, - }, - } + return NewHTTPFlusher() } } diff --git a/plugins/flusher/http/flusher_http_test.go b/plugins/flusher/http/flusher_http_test.go index 64140b8df8..5848ef3f17 100644 --- a/plugins/flusher/http/flusher_http_test.go +++ b/plugins/flusher/http/flusher_http_test.go @@ -15,6 +15,7 @@ package http import ( + "compress/gzip" "context" "fmt" "io" @@ -35,6 +36,8 @@ import ( "github.com/alibaba/ilogtail/pkg/pipeline/extensions" "github.com/alibaba/ilogtail/pkg/protocol" converter "github.com/alibaba/ilogtail/pkg/protocol/converter" + "github.com/alibaba/ilogtail/pkg/protocol/encoder/prometheus" + defaultencoder "github.com/alibaba/ilogtail/plugins/extension/default_encoder" "github.com/alibaba/ilogtail/plugins/test/mock" ) @@ -60,6 +63,34 @@ func TestHttpFlusherInit(t *testing.T) { }) }) + Convey("Given a http flusher with prometheus encoder", t, func() { + flusher := &FlusherHTTP{ + RemoteURL: "http://localhost:9090/write", + Encoder: &extensions.ExtensionConfig{ + Type: "ext_default_encoder", + Options: map[string]any{"Format": "prometheus", "SeriesLimit": 1024}, + }, + Concurrency: 1, + } + Convey("Then Init() should implement prometheus encoder success", func() { + err := flusher.Init(mock.NewEmptyContext("p", "l", "c")) + So(err, ShouldBeNil) + + ext, err := flusher.context.GetExtension(flusher.Encoder.Type, flusher.Encoder.Options) + So(err, ShouldBeNil) + + enc, ok := ext.(extensions.Encoder) + So(ok, ShouldBeTrue) + + defEnc, ok := enc.(*defaultencoder.ExtensionDefaultEncoder) + So(ok, ShouldBeTrue) + + promEnc, ok := defEnc.Encoder.(*prometheus.Encoder) + So(ok, ShouldBeTrue) + So(promEnc.SeriesLimit, ShouldEqual, 1024) + }) + }) + Convey("Given a http flusher with Query contains variable ", t, func() { flusher := &FlusherHTTP{ RemoteURL: "http://localhost:8086/write", @@ -735,6 +766,258 @@ func TestHttpFlusherFlushWithInterceptor(t *testing.T) { }) } +func TestHttpFlusherDropEvents(t *testing.T) { + Convey("Given a http flusher that drops events when queue is full", t, func() { + mockIntercepter := &mockInterceptor{} + flusher := &FlusherHTTP{ + RemoteURL: "http://test.com/write", + Convert: helper.ConvertConfig{ + Protocol: converter.ProtocolInfluxdb, + Encoding: converter.EncodingCustom, + }, + interceptor: mockIntercepter, + context: mock.NewEmptyContext("p", "l", "c"), + AsyncIntercept: true, + Timeout: defaultTimeout, + Concurrency: 1, + queue: make(chan interface{}, 1), + DropEventWhenQueueFull: true, + } + + Convey("should discard events when queue is full", func() { + groupEvents := models.PipelineGroupEvents{ + Events: []models.PipelineEvent{&models.Metric{ + Name: "cpu.load.short", + Timestamp: 1672321328000000000, + Tags: models.NewTagsWithKeyValues("host", "server01", "region", "cn"), + Value: &models.MetricSingleValue{Value: 0.64}, + }}, + } + err := flusher.Export([]*models.PipelineGroupEvents{&groupEvents}, nil) + So(err, ShouldBeNil) + err = flusher.Export([]*models.PipelineGroupEvents{&groupEvents}, nil) + So(err, ShouldBeNil) + So(len(flusher.queue), ShouldEqual, 1) + err = flusher.convertAndFlush(<-flusher.queue) + So(err, ShouldBeNil) + }) + }) +} + +func TestFlusherHTTP_GzipCompression(t *testing.T) { + Convey("Given a http flusher with protocol: Influxdb, encoding: custom, query: contains variable '%{tag.db}'", t, func() { + + var actualRequests []string + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("POST", "http://test.com/write?db=mydb", func(req *http.Request) (*http.Response, error) { + assert.Equal(t, "mydb", req.Header.Get("db")) + body := req.Body + + // Handle gzip request bodies + if req.Header.Get("Content-Encoding") == "gzip" { + body, _ = gzip.NewReader(req.Body) + defer body.Close() + } + + bodyBytes, _ := io.ReadAll(body) + actualRequests = append(actualRequests, string(bodyBytes)) + return httpmock.NewStringResponse(200, "ok"), nil + }) + + flusher := &FlusherHTTP{ + RemoteURL: "http://test.com/write", + Convert: helper.ConvertConfig{ + Protocol: converter.ProtocolInfluxdb, + Encoding: converter.EncodingCustom, + }, + Timeout: defaultTimeout, + Concurrency: 1, + Query: map[string]string{ + "db": "%{tag.db}", + }, + Headers: map[string]string{ + "db": "%{tag.db}", + }, + Compression: "gzip", + } + + err := flusher.Init(&mockContext{}) + So(err, ShouldBeNil) + + Convey("When Flush with logGroupList contains 2 valid Log in influxdb format metrics, each with LogTag: '__tag__:db'", func() { + logGroups := []*protocol.LogGroup{ + { + Logs: []*protocol.Log{ + { + Contents: []*protocol.Log_Content{ + {Key: "__time_nano__", Value: "1668653452000000000"}, + {Key: "__name__", Value: "weather"}, + {Key: "__labels__", Value: "location#$#hangzhou|province#$#zhejiang"}, + {Key: "__value__", Value: "30"}, + }, + }, + { + Contents: []*protocol.Log_Content{ + {Key: "__time_nano__", Value: "1668653452000000001"}, + {Key: "__name__", Value: "weather"}, + {Key: "__labels__", Value: "location#$#hangzhou|province#$#zhejiang"}, + {Key: "__value__", Value: "32"}, + }, + }, + }, + LogTags: []*protocol.LogTag{{Key: "__tag__:db", Value: "mydb"}}, + }, + { + Logs: []*protocol.Log{ + { + Contents: []*protocol.Log_Content{ + {Key: "__time_nano__", Value: "1668653452000000003"}, + {Key: "__name__", Value: "weather"}, + {Key: "__labels__", Value: "location#$#hangzhou|province#$#zhejiang"}, + {Key: "__value__", Value: "30"}, + }, + }, + { + Contents: []*protocol.Log_Content{ + {Key: "__time_nano__", Value: "1668653452000000004"}, + {Key: "__name__", Value: "weather"}, + {Key: "__labels__", Value: "location#$#hangzhou|province#$#zhejiang"}, + {Key: "__value__", Value: "32"}, + }, + }, + }, + LogTags: []*protocol.LogTag{{Key: "__tag__:db", Value: "mydb"}}, + }, + } + + err := flusher.Flush("", "", "", logGroups) + flusher.Stop() + Convey("Then", func() { + Convey("Flush() should not return error", func() { + So(err, ShouldBeNil) + }) + + Convey("each logGroup should be sent as one single request", func() { + reqCount := httpmock.GetTotalCallCount() + So(reqCount, ShouldEqual, 2) + }) + + Convey("each http request body should be valid as expect", func() { + So(actualRequests, ShouldResemble, []string{ + "weather,location=hangzhou,province=zhejiang value=30 1668653452000000000\n" + + "weather,location=hangzhou,province=zhejiang value=32 1668653452000000001\n", + + "weather,location=hangzhou,province=zhejiang value=30 1668653452000000003\n" + + "weather,location=hangzhou,province=zhejiang value=32 1668653452000000004\n", + }) + }) + }) + }) + }) + + Convey("Given a http flusher with protocol: custom_single, encoding: json, query: contains variable '%{tag.db}'", t, func() { + + var actualRequests []string + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("POST", "http://test.com/write?db=mydb", func(req *http.Request) (*http.Response, error) { + body, _ := io.ReadAll(req.Body) + actualRequests = append(actualRequests, string(body)) + return httpmock.NewStringResponse(200, "ok"), nil + }) + + flusher := &FlusherHTTP{ + RemoteURL: "http://test.com/write", + Convert: helper.ConvertConfig{ + Protocol: converter.ProtocolCustomSingle, + Encoding: converter.EncodingJSON, + }, + Timeout: defaultTimeout, + Concurrency: 1, + Query: map[string]string{ + "db": "%{tag.db}", + }, + } + + err := flusher.Init(&mockContext{}) + So(err, ShouldBeNil) + + Convey("When Flush with logGroupList contains 2 valid Log in influxdb format metrics, each LOG with Content: '__tag__:db'", func() { + logGroups := []*protocol.LogGroup{ + { + Logs: []*protocol.Log{ + { + Contents: []*protocol.Log_Content{ + {Key: "__time_nano__", Value: "1668653452000000000"}, + {Key: "__name__", Value: "weather"}, + {Key: "__labels__", Value: "location#$#hangzhou|province#$#zhejiang"}, + {Key: "__value__", Value: "30"}, + {Key: "__tag__:db", Value: "mydb"}, + }, + }, + { + Contents: []*protocol.Log_Content{ + {Key: "__time_nano__", Value: "1668653452000000001"}, + {Key: "__name__", Value: "weather"}, + {Key: "__labels__", Value: "location#$#hangzhou|province#$#zhejiang"}, + {Key: "__value__", Value: "32"}, + {Key: "__tag__:db", Value: "mydb"}, + }, + }, + }, + }, + { + Logs: []*protocol.Log{ + { + Contents: []*protocol.Log_Content{ + {Key: "__time_nano__", Value: "1668653452000000003"}, + {Key: "__name__", Value: "weather"}, + {Key: "__labels__", Value: "location#$#hangzhou|province#$#zhejiang"}, + {Key: "__value__", Value: "30"}, + {Key: "__tag__:db", Value: "mydb"}, + }, + }, + { + Contents: []*protocol.Log_Content{ + {Key: "__time_nano__", Value: "1668653452000000004"}, + {Key: "__name__", Value: "weather"}, + {Key: "__labels__", Value: "location#$#hangzhou|province#$#zhejiang"}, + {Key: "__value__", Value: "32"}, + {Key: "__tag__:db", Value: "mydb"}, + }, + }, + }, + }, + } + + err := flusher.Flush("", "", "", logGroups) + flusher.Stop() + Convey("Then", func() { + Convey("Flush() should not return error", func() { + So(err, ShouldBeNil) + }) + + Convey("each log in logGroups should be sent as one single request", func() { + reqCount := httpmock.GetTotalCallCount() + So(reqCount, ShouldEqual, 4) + }) + + Convey("each http request body should be valid as expect", func() { + So(actualRequests, ShouldResemble, []string{ + `{"contents":{"__labels__":"location#$#hangzhou|province#$#zhejiang","__name__":"weather","__time_nano__":"1668653452000000000","__value__":"30"},"tags":{"db":"mydb","host.ip":""},"time":0}`, + `{"contents":{"__labels__":"location#$#hangzhou|province#$#zhejiang","__name__":"weather","__time_nano__":"1668653452000000001","__value__":"32"},"tags":{"db":"mydb","host.ip":""},"time":0}`, + `{"contents":{"__labels__":"location#$#hangzhou|province#$#zhejiang","__name__":"weather","__time_nano__":"1668653452000000003","__value__":"30"},"tags":{"db":"mydb","host.ip":""},"time":0}`, + `{"contents":{"__labels__":"location#$#hangzhou|province#$#zhejiang","__name__":"weather","__time_nano__":"1668653452000000004","__value__":"32"},"tags":{"db":"mydb","host.ip":""},"time":0}`, + }) + }) + }) + }) + }) +} + type mockContext struct { pipeline.Context basicAuth *basicAuth diff --git a/plugins/input/command/command_const.go b/plugins/input/command/command_const.go index d6d7dfc00f..fd66f281f0 100644 --- a/plugins/input/command/command_const.go +++ b/plugins/input/command/command_const.go @@ -15,7 +15,7 @@ package command const ( - pluginName = "input_command" + pluginType = "input_command" defaultContentType = "PlainText" defaultIntervalMs = 5000 // The default is Alibaba Cloud's collection frequency of 5s defaltExecScriptTimeOut = 3000 // Default 3 seconds timeout diff --git a/plugins/input/command/input_command.go b/plugins/input/command/input_command.go index 093976ba31..adf7e3e82e 100644 --- a/plugins/input/command/input_command.go +++ b/plugins/input/command/input_command.go @@ -224,7 +224,7 @@ func (in *InputCommand) Description() string { } func init() { - pipeline.MetricInputs[pluginName] = func() pipeline.MetricInput { + pipeline.MetricInputs[pluginType] = func() pipeline.MetricInput { return &InputCommand{ ContentEncoding: defaultContentType, IntervalMs: defaultIntervalMs, diff --git a/plugins/input/command/input_command_test.go b/plugins/input/command/input_command_test.go index cfdbc62e59..0ce4282182 100644 --- a/plugins/input/command/input_command_test.go +++ b/plugins/input/command/input_command_test.go @@ -38,7 +38,7 @@ func TestCommandTestCollecetUserBase64WithTimeout(t *testing.T) { fmt.Printf("Username %s\n", u.Username) ctx := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputCommand) + p := pipeline.MetricInputs[pluginType]().(*InputCommand) c := new(test.MockMetricCollector) // Script set to sleep for 5 seconds scriptContent := `sleep 5 && echo -e "__labels__:a#\$#1|b#\$#2 __value__:0 __name__:metric_command_example \n __labels__:a#\$#3|b#\$#4 __value__:3 __name__:metric_command_example2"` @@ -70,7 +70,7 @@ func TestCommandTestCollecetUserBase64(t *testing.T) { } fmt.Printf("Username %s\n", u.Username) ctx := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputCommand) + p := pipeline.MetricInputs[pluginType]().(*InputCommand) c := new(test.MockMetricCollector) scriptContent := `echo -e "__labels__:a#\$#1|b#\$#2 __value__:0 __name__:metric_command_example \n __labels__:a#\$#3|b#\$#4 __value__:3 __name__:metric_command_example2"` // base64 @@ -100,7 +100,7 @@ func TestCommandTestCollect(t *testing.T) { fmt.Printf("Username %s\n", u.Username) ctx := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputCommand) + p := pipeline.MetricInputs[pluginType]().(*InputCommand) c := new(test.MockMetricCollector) p.ScriptContent = `echo "test"` @@ -128,7 +128,7 @@ func TestCommandTestExceptionCollect(t *testing.T) { } fmt.Printf("Username %s\n", u.Username) ctx := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputCommand) + p := pipeline.MetricInputs[pluginType]().(*InputCommand) c := new(test.MockMetricCollector) p.ScriptContent = `echo "1"` @@ -157,7 +157,7 @@ func TestCommandTestTimeoutCollect(t *testing.T) { } fmt.Printf("Username %s\n", u.Username) ctx := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputCommand) + p := pipeline.MetricInputs[pluginType]().(*InputCommand) c := new(test.MockMetricCollector) p.ScriptContent = `sleep 10` @@ -191,7 +191,7 @@ func TestCommandTestInit(t *testing.T) { fmt.Printf("Username %s\n", u.Username) ctx := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputCommand) + p := pipeline.MetricInputs[pluginType]().(*InputCommand) _, err = p.Init(ctx) require.Error(t, err) if err != nil { @@ -295,7 +295,7 @@ func TestErrorCmdPath(t *testing.T) { fmt.Printf("Username %s\n", u.Username) ctx := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputCommand) + p := pipeline.MetricInputs[pluginType]().(*InputCommand) p.ScriptContent = `echo "test"` p.ScriptType = "shell" p.ContentEncoding = "PlainText" diff --git a/plugins/input/hostmeta/input_host_meta.go b/plugins/input/hostmeta/input_host_meta.go index 962e14c0ab..6a22d09360 100644 --- a/plugins/input/hostmeta/input_host_meta.go +++ b/plugins/input/hostmeta/input_host_meta.go @@ -29,7 +29,7 @@ import ( "github.com/alibaba/ilogtail/pkg/util" ) -const pluginName = "metric_meta_host" +const pluginType = "metric_meta_host" const ( Process = "PROCESS" @@ -191,7 +191,7 @@ func formatCmd(cmd string) string { } func init() { - pipeline.MetricInputs[pluginName] = func() pipeline.MetricInput { + pipeline.MetricInputs[pluginType] = func() pipeline.MetricInput { return &InputNodeMeta{ CPU: true, Memory: true, diff --git a/plugins/input/hostmeta/input_host_meta_benchmark_test.go b/plugins/input/hostmeta/input_host_meta_benchmark_test.go index 8b28c359a6..97f2ad5005 100644 --- a/plugins/input/hostmeta/input_host_meta_benchmark_test.go +++ b/plugins/input/hostmeta/input_host_meta_benchmark_test.go @@ -29,7 +29,7 @@ import ( // Benchmark_CollectNoProcess-64 314 3775773 ns/op func Benchmark_CollectNoProcess(b *testing.B) { cxt := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputNodeMeta) + p := pipeline.MetricInputs[pluginType]().(*InputNodeMeta) p.CPU = true p.Memory = true p.Net = true @@ -56,7 +56,7 @@ func Benchmark_CollectNoProcess(b *testing.B) { // Benchmark_CollectWithProcess-64 249 4781412 ns/op func Benchmark_CollectWithProcess(b *testing.B) { cxt := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputNodeMeta) + p := pipeline.MetricInputs[pluginType]().(*InputNodeMeta) p.CPU = true p.Memory = true p.Net = true diff --git a/plugins/input/hostmeta/input_host_meta_test.go b/plugins/input/hostmeta/input_host_meta_test.go index 3c627e4d5a..20c2fc1720 100644 --- a/plugins/input/hostmeta/input_host_meta_test.go +++ b/plugins/input/hostmeta/input_host_meta_test.go @@ -68,7 +68,7 @@ func TestInputNodeMeta_Collect(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputNodeMeta) + p := pipeline.MetricInputs[pluginType]().(*InputNodeMeta) c := new(test.MockMetricCollector) p.Disk = tt.args.Disk p.CPU = tt.args.CPU diff --git a/plugins/input/input_wineventlog/wineventlog.go b/plugins/input/input_wineventlog/wineventlog.go index 07325eb8a6..8f985fe857 100644 --- a/plugins/input/input_wineventlog/wineventlog.go +++ b/plugins/input/input_wineventlog/wineventlog.go @@ -30,7 +30,7 @@ import ( ) const ( - pluginName = "service_wineventlog" + pluginType = "service_wineventlog" ) // WinEventLog represents the plugin to collect Windows event logs. @@ -160,7 +160,7 @@ func (w *WinEventLog) run() bool { return true } defer func() { - logger.Infof(w.context.GetRuntimeContext(), "%s Stopping %v", w.logPrefix, pluginName) + logger.Infof(w.context.GetRuntimeContext(), "%s Stopping %v", w.logPrefix, pluginType) err := w.eventLogger.Close() if err != nil { logger.Warningf(w.context.GetRuntimeContext(), "WINEVENTLOG_MAIN_ALARM", "%s Close() error", w.logPrefix, err) @@ -209,7 +209,7 @@ func (w *WinEventLog) run() bool { } func (w *WinEventLog) initCheckpoint() { - checkpointKey := pluginName + "_" + w.Name + checkpointKey := pluginType + "_" + w.Name ok := w.context.GetCheckPointObject(checkpointKey, &w.checkpoint) if ok { logger.Infof(w.context.GetRuntimeContext(), "%s Checkpoint loaded: %v", w.logPrefix, w.checkpoint) @@ -218,7 +218,7 @@ func (w *WinEventLog) initCheckpoint() { } func (w *WinEventLog) saveCheckpoint() { - checkpointKey := pluginName + "_" + w.Name + checkpointKey := pluginType + "_" + w.Name w.context.SaveCheckPointObject(checkpointKey, &w.checkpoint) } @@ -231,7 +231,7 @@ func newWinEventLog() *WinEventLog { } func init() { - pipeline.ServiceInputs[pluginName] = func() pipeline.ServiceInput { + pipeline.ServiceInputs[pluginType] = func() pipeline.ServiceInput { return newWinEventLog() } } diff --git a/plugins/input/kafka/input_kafka.go b/plugins/input/kafka/input_kafka.go index 02052f62d9..5d5cf37cc0 100644 --- a/plugins/input/kafka/input_kafka.go +++ b/plugins/input/kafka/input_kafka.go @@ -66,23 +66,23 @@ type InputKafka struct { } const ( - pluginName = "service_kafka" + pluginType = "service_kafka" ) func (k *InputKafka) Init(context pipeline.Context) (int, error) { k.context = context if len(k.Brokers) == 0 { - return 0, fmt.Errorf("must specify Brokers for plugin %v", pluginName) + return 0, fmt.Errorf("must specify Brokers for plugin %v", pluginType) } if len(k.Topics) == 0 { - return 0, fmt.Errorf("must specify Topics for plugin %v", pluginName) + return 0, fmt.Errorf("must specify Topics for plugin %v", pluginType) } if k.ConsumerGroup == "" { - return 0, fmt.Errorf("must specify ConsumerGroup for plugin %v", pluginName) + return 0, fmt.Errorf("must specify ConsumerGroup for plugin %v", pluginType) } if k.ClientID == "" { - return 0, fmt.Errorf("must specify ClientID for plugin %v", pluginName) + return 0, fmt.Errorf("must specify ClientID for plugin %v", pluginType) } // init decoder @@ -272,7 +272,7 @@ func (k *InputKafka) Collect(collector pipeline.Collector) error { } func init() { - pipeline.ServiceInputs[pluginName] = func() pipeline.ServiceInput { + pipeline.ServiceInputs[pluginType] = func() pipeline.ServiceInput { return &InputKafka{ ConsumerGroup: "", ClientID: "", diff --git a/plugins/input/kubernetesmetav1/crd_open_kruise.go b/plugins/input/kubernetesmetav1/crd_open_kruise.go index d3d82fe56c..4c31f18808 100644 --- a/plugins/input/kubernetesmetav1/crd_open_kruise.go +++ b/plugins/input/kubernetesmetav1/crd_open_kruise.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( "github.com/alibaba/ilogtail/pkg/helper" diff --git a/plugins/input/kubernetesmetav1/input_kubernetes_meta.go b/plugins/input/kubernetesmetav1/input_kubernetes_meta.go index fe967d8d20..f837c81881 100644 --- a/plugins/input/kubernetesmetav1/input_kubernetes_meta.go +++ b/plugins/input/kubernetesmetav1/input_kubernetes_meta.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( "fmt" @@ -32,7 +32,7 @@ import ( "github.com/alibaba/ilogtail/pkg/pipeline" ) -const pluginName = "metric_meta_kubernetes" +const pluginType = "metric_meta_kubernetes" const ( defaultIntervalMs = 300000 ) @@ -246,7 +246,7 @@ func (in *InputKubernetesMeta) Stop() error { } func init() { - pipeline.MetricInputs[pluginName] = func() pipeline.MetricInput { + pipeline.MetricInputs[pluginType] = func() pipeline.MetricInput { return &InputKubernetesMeta{ Pod: true, Service: true, diff --git a/plugins/input/kubernetesmetav1/input_kubernetes_meta_test.go b/plugins/input/kubernetesmetav1/input_kubernetes_meta_test.go index 4e58a8a1fb..cc14a145b3 100644 --- a/plugins/input/kubernetesmetav1/input_kubernetes_meta_test.go +++ b/plugins/input/kubernetesmetav1/input_kubernetes_meta_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( "github.com/alibaba/ilogtail/pkg/pipeline" @@ -36,7 +36,7 @@ import ( func BenchmarkInputKubernetesMeta_Collect(b *testing.B) { cxt := mock.NewEmptyContext("project", "store", "config") - p := pipeline.MetricInputs[pluginName]().(*InputKubernetesMeta) + p := pipeline.MetricInputs[pluginType]().(*InputKubernetesMeta) p.KubeConfigPath = "default" if _, err := p.Init(cxt); err != nil { b.Errorf("cannot init the mock process plugin: %v", err) diff --git a/plugins/input/kubernetesmetav1/kubernetes_collect.go b/plugins/input/kubernetesmetav1/kubernetes_collect.go index 0b08936a8b..4b3d42edfb 100644 --- a/plugins/input/kubernetesmetav1/kubernetes_collect.go +++ b/plugins/input/kubernetesmetav1/kubernetes_collect.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( "strings" diff --git a/plugins/input/kubernetesmetav1/kubernetes_collect_apps.go b/plugins/input/kubernetesmetav1/kubernetes_collect_apps.go index d48aa8bc67..b57a738123 100644 --- a/plugins/input/kubernetesmetav1/kubernetes_collect_apps.go +++ b/plugins/input/kubernetesmetav1/kubernetes_collect_apps.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/plugins/input/kubernetesmetav1/kubernetes_collect_batch.go b/plugins/input/kubernetesmetav1/kubernetes_collect_batch.go index 86a305d560..2df6f44648 100644 --- a/plugins/input/kubernetesmetav1/kubernetes_collect_batch.go +++ b/plugins/input/kubernetesmetav1/kubernetes_collect_batch.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/plugins/input/kubernetesmetav1/kubernetes_collect_core.go b/plugins/input/kubernetesmetav1/kubernetes_collect_core.go index cb1481b6f9..00f77d5ea3 100644 --- a/plugins/input/kubernetesmetav1/kubernetes_collect_core.go +++ b/plugins/input/kubernetesmetav1/kubernetes_collect_core.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( "fmt" diff --git a/plugins/input/kubernetesmetav1/kubernetes_collect_core_test.go b/plugins/input/kubernetesmetav1/kubernetes_collect_core_test.go index 37344369ee..15bbad73b2 100644 --- a/plugins/input/kubernetesmetav1/kubernetes_collect_core_test.go +++ b/plugins/input/kubernetesmetav1/kubernetes_collect_core_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import "testing" diff --git a/plugins/input/kubernetesmetav1/kubernetes_collect_networking.go b/plugins/input/kubernetesmetav1/kubernetes_collect_networking.go index 86b25cd6cd..55bbc55fb4 100644 --- a/plugins/input/kubernetesmetav1/kubernetes_collect_networking.go +++ b/plugins/input/kubernetesmetav1/kubernetes_collect_networking.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( "strconv" diff --git a/plugins/input/kubernetesmetav1/kubernetes_collect_storage.go b/plugins/input/kubernetesmetav1/kubernetes_collect_storage.go index e43827664a..6e5b15878e 100644 --- a/plugins/input/kubernetesmetav1/kubernetes_collect_storage.go +++ b/plugins/input/kubernetesmetav1/kubernetes_collect_storage.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetesmeta +package kubernetesmetav1 import ( "k8s.io/apimachinery/pkg/labels" diff --git a/plugins/input/snmp/input_snmp.go b/plugins/input/snmp/input_snmp.go index 68df8e1c4d..0b50270abd 100644 --- a/plugins/input/snmp/input_snmp.go +++ b/plugins/input/snmp/input_snmp.go @@ -35,7 +35,7 @@ import ( g "github.com/gosnmp/gosnmp" ) -const pluginName = "service_snmp" +const pluginType = "service_snmp" // SNMP is a service input plugin to collect logs following SNMP protocol. // It works with SNMP agents configured by users. It uses TCP or UDP @@ -586,7 +586,7 @@ func (s *Agent) Collect(_ pipeline.Collector) error { } func init() { - pipeline.ServiceInputs[pluginName] = func() pipeline.ServiceInput { + pipeline.ServiceInputs[pluginType] = func() pipeline.ServiceInput { return &Agent{ Port: "161", Transport: "udp", diff --git a/plugins/processor/addfields/processor_add_fields.go b/plugins/processor/addfields/processor_add_fields.go index dda6c4afae..aa9faa9d94 100644 --- a/plugins/processor/addfields/processor_add_fields.go +++ b/plugins/processor/addfields/processor_add_fields.go @@ -29,13 +29,13 @@ type ProcessorAddFields struct { context pipeline.Context } -const pluginName = "processor_add_fields" +const pluginType = "processor_add_fields" // Init method would be triggered before working for init some system resources, // like socket, mutex. In this plugin, it verifies Fields must not be empty. func (p *ProcessorAddFields) Init(context pipeline.Context) error { if len(p.Fields) == 0 { - return fmt.Errorf("must specify Fields for plugin %v", pluginName) + return fmt.Errorf("must specify Fields for plugin %v", pluginType) } p.context = context return nil @@ -94,7 +94,7 @@ func (p *ProcessorAddFields) isExist(log *protocol.Log, key string) bool { // Register the plugin to the Processors array. func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorAddFields{ Fields: nil, IgnoreIfExist: false, diff --git a/plugins/processor/appender/processor_appender.go b/plugins/processor/appender/processor_appender.go index a6f956e6e8..67ac3ae348 100644 --- a/plugins/processor/appender/processor_appender.go +++ b/plugins/processor/appender/processor_appender.go @@ -41,14 +41,14 @@ type ProcessorAppender struct { context pipeline.Context } -const pluginName = "processor_appender" +const pluginType = "processor_appender" var replaceReg = regexp.MustCompile(`{{[^}]+}}`) // Init called for init some system resources, like socket, mutex... func (p *ProcessorAppender) Init(context pipeline.Context) error { if len(p.Key) == 0 || len(p.Value) == 0 { - return fmt.Errorf("must specify Key and Value for plugin %v", pluginName) + return fmt.Errorf("must specify Key and Value for plugin %v", pluginType) } p.context = context manager := platformmeta.GetManager(p.Platform) @@ -191,7 +191,7 @@ func (p *ProcessorAppender) ParseVariableValue(key string) string { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorAppender{ Platform: platformmeta.Mock, } diff --git a/plugins/processor/desensitize/processor_desensitize.go b/plugins/processor/desensitize/processor_desensitize.go index 8038ba65c2..5907e4381b 100644 --- a/plugins/processor/desensitize/processor_desensitize.go +++ b/plugins/processor/desensitize/processor_desensitize.go @@ -39,7 +39,7 @@ type ProcessorDesensitize struct { regexContent *regexp2.Regexp } -const pluginName = "processor_desensitize" +const pluginType = "processor_desensitize" // Init called for init some system resources, like socket, mutex... func (p *ProcessorDesensitize) Init(context pipeline.Context) error { @@ -157,7 +157,7 @@ func (p *ProcessorDesensitize) desensitize(val string) string { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorDesensitize{ SourceKey: "", Method: "const", diff --git a/plugins/processor/dictmap/processor_dict_map.go b/plugins/processor/dictmap/processor_dict_map.go index c374afe558..a9d7facc9c 100644 --- a/plugins/processor/dictmap/processor_dict_map.go +++ b/plugins/processor/dictmap/processor_dict_map.go @@ -25,7 +25,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" ) -const pluginName = "processor_dict_map" +const pluginType = "processor_dict_map" type ProcessorDictMap struct { DictFilePath string @@ -45,7 +45,7 @@ func (p *ProcessorDictMap) Init(context pipeline.Context) error { p.context = context if p.SourceKey == "" { - return fmt.Errorf("must specify SourceKey for plugin %v", pluginName) + return fmt.Errorf("must specify SourceKey for plugin %v", pluginType) } if p.DestKey == "" || p.DestKey == p.SourceKey { @@ -56,11 +56,11 @@ func (p *ProcessorDictMap) Init(context pipeline.Context) error { } if p.DictFilePath == "" && len(p.MapDict) == 0 { - return fmt.Errorf("at least give one source map data for plugin %v", pluginName) + return fmt.Errorf("at least give one source map data for plugin %v", pluginType) } if len(p.MapDict) > p.MaxDictSize { - return fmt.Errorf("map size exceed maximum length %v for plugin %v ", p.MaxDictSize, pluginName) + return fmt.Errorf("map size exceed maximum length %v for plugin %v ", p.MaxDictSize, pluginType) } if p.Mode != "overwrite" && p.Mode != "fill" { @@ -128,7 +128,7 @@ func (p *ProcessorDictMap) readCsvFile() error { return fmt.Errorf("hash crash, check whether the map rule redefined of value: %+v", value) } - logger.Debugf(p.context.GetRuntimeContext(), "Plugin %v adds mapping rule %v : %v", pluginName, row[0], row[1]) + logger.Debugf(p.context.GetRuntimeContext(), "Plugin %v adds mapping rule %v : %v", pluginType, row[0], row[1]) p.MapDict[row[0]] = row[1] } return nil @@ -187,7 +187,7 @@ func (p *ProcessorDictMap) processLog(log *protocol.Log) { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorDictMap{ HandleMissing: false, Missing: "Unknown", diff --git a/plugins/processor/drop/processor_drop.go b/plugins/processor/drop/processor_drop.go index f682978905..2f284f35cb 100644 --- a/plugins/processor/drop/processor_drop.go +++ b/plugins/processor/drop/processor_drop.go @@ -28,12 +28,12 @@ type ProcessorDrop struct { context pipeline.Context } -const pluginName = "processor_drop" +const pluginType = "processor_drop" // Init called for init some system resources, like socket, mutex... func (p *ProcessorDrop) Init(context pipeline.Context) error { if len(p.DropKeys) == 0 { - return fmt.Errorf("must specify DropKeys for plugin %v", pluginName) + return fmt.Errorf("must specify DropKeys for plugin %v", pluginType) } p.context = context @@ -64,7 +64,7 @@ func (p *ProcessorDrop) processLog(log *protocol.Log) { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorDrop{} } } diff --git a/plugins/processor/encrypt/processor_encrypt.go b/plugins/processor/encrypt/processor_encrypt.go index e170e8086b..0155b9f713 100644 --- a/plugins/processor/encrypt/processor_encrypt.go +++ b/plugins/processor/encrypt/processor_encrypt.go @@ -34,7 +34,7 @@ import ( ) const ( - pluginName = "processor_encrypt" + pluginType = "processor_encrypt" defaultAlarmType = "PROCESSOR_ENCRYPT_ALARM" encryptErrorText = "ENCRYPT_ERROR" ) @@ -74,7 +74,7 @@ type ProcessorEncrypt struct { func (p *ProcessorEncrypt) Init(context pipeline.Context) error { p.context = context if len(p.SourceKeys) == 0 { - return fmt.Errorf("plugin %v must specify SourceKey", pluginName) + return fmt.Errorf("plugin %v must specify SourceKey", pluginType) } p.keyDict = make(map[string]bool) for _, key := range p.SourceKeys { @@ -94,7 +94,7 @@ func (p *ProcessorEncrypt) Init(context pipeline.Context) error { } func (*ProcessorEncrypt) Description() string { - return fmt.Sprintf("processor %v is used to encrypt data with AES CBC", pluginName) + return fmt.Sprintf("processor %v is used to encrypt data with AES CBC", pluginType) } func (p *ProcessorEncrypt) ProcessLogs(logArray []*protocol.Log) []*protocol.Log { @@ -162,7 +162,7 @@ func (p *ProcessorEncrypt) paddingWithPKCS7(data string) []byte { func (p *ProcessorEncrypt) parseKey() error { if len(p.EncryptionParameters.Key) == 0 && len(p.EncryptionParameters.KeyFilePath) == 0 { - return fmt.Errorf("plugin %v must specify Key or KeyFilePath", pluginName) + return fmt.Errorf("plugin %v must specify Key or KeyFilePath", pluginType) } var err error if len(p.EncryptionParameters.KeyFilePath) > 0 { @@ -173,7 +173,7 @@ func (p *ProcessorEncrypt) parseKey() error { } if err != nil { return fmt.Errorf("plugin %v loads key file %v error: %v", - pluginName, p.EncryptionParameters.KeyFilePath, err) + pluginType, p.EncryptionParameters.KeyFilePath, err) } logger.Infof(p.context.GetRuntimeContext(), "read key from file %v, hash: %v", p.EncryptionParameters.KeyFilePath, @@ -182,10 +182,10 @@ func (p *ProcessorEncrypt) parseKey() error { // Decode from hex to bytes. if p.key, err = hex.DecodeString(p.EncryptionParameters.Key); err != nil { - return fmt.Errorf("plugin %v decodes key from hex error: %v, try hex", pluginName, err) + return fmt.Errorf("plugin %v decodes key from hex error: %v, try hex", pluginType, err) } if p.cipher, err = aes.NewCipher(p.key); err != nil { - return fmt.Errorf("plugin %s create cipher with key error: %v", pluginName, err) + return fmt.Errorf("plugin %s create cipher with key error: %v", pluginType, err) } p.blockSize = p.cipher.BlockSize() return nil @@ -193,7 +193,7 @@ func (p *ProcessorEncrypt) parseKey() error { func (p *ProcessorEncrypt) parseIV() error { if len(p.EncryptionParameters.IV) == 0 { - return fmt.Errorf("plugin %s must specify IV", pluginName) + return fmt.Errorf("plugin %s must specify IV", pluginType) // [DISABLED] Random IV // logger.Infof("IV is not specified, use random IV and prepend it to ciphertext") // return nil @@ -201,11 +201,11 @@ func (p *ProcessorEncrypt) parseIV() error { var err error if p.iv, err = hex.DecodeString(p.EncryptionParameters.IV); err != nil { - return fmt.Errorf("plugin %v decodes IV %v error: %v", pluginName, p.EncryptionParameters.IV, err) + return fmt.Errorf("plugin %v decodes IV %v error: %v", pluginType, p.EncryptionParameters.IV, err) } if len(p.iv) != p.blockSize { return fmt.Errorf("plugin %v finds size mismatch between IV(%v) and BlockSize(%v), must be same", - pluginName, len(p.iv), p.blockSize) + pluginType, len(p.iv), p.blockSize) } return nil } @@ -218,7 +218,7 @@ func newProcessorEncrypt() *ProcessorEncrypt { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return newProcessorEncrypt() } } diff --git a/plugins/processor/filter/keyregex/processor_filter_key_regex.go b/plugins/processor/filter/keyregex/processor_filter_key_regex.go index 8416029664..93222e834e 100644 --- a/plugins/processor/filter/keyregex/processor_filter_key_regex.go +++ b/plugins/processor/filter/keyregex/processor_filter_key_regex.go @@ -24,7 +24,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" ) -const pluginName = "processor_filter_key_regex" +const pluginType = "processor_filter_key_regex" type ProcessorKeyFilter struct { Include []string @@ -64,8 +64,8 @@ func (p *ProcessorKeyFilter) Init(context pipeline.Context) error { } metricsRecord := p.context.GetMetricRecord() - p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_filtered", pluginName)) - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginName)) + p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_filtered", pluginType)) + p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) return nil } @@ -118,7 +118,7 @@ func (p *ProcessorKeyFilter) ProcessLogs(logArray []*protocol.Log) []*protocol.L } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorKeyFilter{} } } diff --git a/plugins/processor/filter/regex/processor_filter_regex.go b/plugins/processor/filter/regex/processor_filter_regex.go index 8ed1778424..dbd04022bf 100644 --- a/plugins/processor/filter/regex/processor_filter_regex.go +++ b/plugins/processor/filter/regex/processor_filter_regex.go @@ -24,7 +24,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" ) -const pluginName = "processor_filter_regex" +const pluginType = "processor_filter_regex" // ProcessorRegexFilter is a processor plugin to filter log according to the value of field. // Include/Exclude are maps from string to string, key is used to search field in log, value @@ -68,8 +68,8 @@ func (p *ProcessorRegexFilter) Init(context pipeline.Context) error { } } metricsRecord := p.context.GetMetricRecord() - p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_filtered", pluginName)) - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginName)) + p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_filtered", pluginType)) + p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) return nil } @@ -132,7 +132,7 @@ func (p *ProcessorRegexFilter) ProcessLogs(logArray []*protocol.Log) []*protocol } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorRegexFilter{} } } diff --git a/plugins/processor/gotime/processor_gotime.go b/plugins/processor/gotime/processor_gotime.go index c945c6336a..e96d1deb07 100644 --- a/plugins/processor/gotime/processor_gotime.go +++ b/plugins/processor/gotime/processor_gotime.go @@ -51,23 +51,23 @@ type ProcessorGotime struct { } const ( - pluginName = "processor_gotime" + pluginType = "processor_gotime" machineTimeZone = -100 ) // Init called for init some system resources, like socket, mutex... func (p *ProcessorGotime) Init(context pipeline.Context) error { if p.SourceKey == "" { - return fmt.Errorf("must specify SourceKey for plugin %v", pluginName) + return fmt.Errorf("must specify SourceKey for plugin %v", pluginType) } if p.SourceFormat == "" { - return fmt.Errorf("must specify SourceFormat for plugin %v", pluginName) + return fmt.Errorf("must specify SourceFormat for plugin %v", pluginType) } if p.DestKey == "" { - return fmt.Errorf("must specify DestKey for plugin %v", pluginName) + return fmt.Errorf("must specify DestKey for plugin %v", pluginType) } if p.DestFormat == "" { - return fmt.Errorf("must specify DestFormat for plugin %v", pluginName) + return fmt.Errorf("must specify DestFormat for plugin %v", pluginType) } p.sourceLocation = time.Local if p.SourceLocation != machineTimeZone { @@ -166,7 +166,7 @@ func (p *ProcessorGotime) processLog(log *protocol.Log) { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorGotime{ SourceKey: "", SourceFormat: "", diff --git a/plugins/processor/grok/processor_grok.go b/plugins/processor/grok/processor_grok.go index 92aed6befc..178e11d6b0 100644 --- a/plugins/processor/grok/processor_grok.go +++ b/plugins/processor/grok/processor_grok.go @@ -51,7 +51,7 @@ type ProcessorGrok struct { aliases map[string]string // Correspondence between alias and original name, e.g. {"pid":"POSINT", "program":"PROG"} } -const pluginName = "processor_grok" +const pluginType = "processor_grok" // Init called for init some system resources, like socket, mutex... func (p *ProcessorGrok) Init(context pipeline.Context) error { @@ -336,7 +336,7 @@ func (p *ProcessorGrok) compileMatchs() error { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorGrok{ CustomPatternDir: []string{}, CustomPatterns: map[string]string{}, diff --git a/plugins/processor/json/processor_json.go b/plugins/processor/json/processor_json.go index 59dbfe4454..4b450f1a65 100644 --- a/plugins/processor/json/processor_json.go +++ b/plugins/processor/json/processor_json.go @@ -43,12 +43,12 @@ type ProcessorJSON struct { procParseInSizeBytes pipeline.CounterMetric } -const pluginName = "processor_json" +const pluginType = "processor_json" // Init called for init some system resources, like socket, mutex... func (p *ProcessorJSON) Init(context pipeline.Context) error { if p.SourceKey == "" { - return fmt.Errorf("must specify SourceKey for plugin %v", pluginName) + return fmt.Errorf("must specify SourceKey for plugin %v", pluginType) } p.context = context metricsRecord := p.context.GetMetricRecord() @@ -107,7 +107,7 @@ func (p *ProcessorJSON) shouldKeepSource(err error) bool { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorJSON{ SourceKey: "", NoKeyError: true, diff --git a/plugins/processor/otel/processor_otel_trace.go b/plugins/processor/otel/processor_otel_trace.go index c51b63f8d4..092e58de5d 100644 --- a/plugins/processor/otel/processor_otel_trace.go +++ b/plugins/processor/otel/processor_otel_trace.go @@ -38,7 +38,7 @@ type ProcessorOtelTraceParser struct { ParentSpanIDNeedDecode bool } -const pluginName = "processor_otel_trace" +const pluginType = "processor_otel_trace" func (p *ProcessorOtelTraceParser) Init(context pipeline.Context) error { p.context = context @@ -145,7 +145,7 @@ func (p *ProcessorOtelTraceParser) processProtoJSONTraceData(val string) ([]*pro } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorOtelTraceParser{ SourceKey: "", NoKeyError: false, diff --git a/plugins/processor/packjson/processor_packjson.go b/plugins/processor/packjson/processor_packjson.go index 3ccc529dfe..c8bc47c0f0 100644 --- a/plugins/processor/packjson/processor_packjson.go +++ b/plugins/processor/packjson/processor_packjson.go @@ -32,15 +32,15 @@ type ProcessorPackjson struct { context pipeline.Context } -const pluginName = "processor_packjson" +const pluginType = "processor_packjson" // Init called for init some system resources, like socket, mutex... func (p *ProcessorPackjson) Init(context pipeline.Context) error { if len(p.SourceKeys) == 0 { - return fmt.Errorf("must specify SourceKeys for plugin %v", pluginName) + return fmt.Errorf("must specify SourceKeys for plugin %v", pluginType) } if p.DestKey == "" { - return fmt.Errorf("must specify DestKey for plugin %v", pluginName) + return fmt.Errorf("must specify DestKey for plugin %v", pluginType) } p.keyDictionary = make(map[string]bool) for _, sourceKey := range p.SourceKeys { @@ -93,7 +93,7 @@ func (p *ProcessorPackjson) processLog(log *protocol.Log) { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorPackjson{ SourceKeys: nil, DestKey: "", diff --git a/plugins/processor/pickkey/processor_pick_key.go b/plugins/processor/pickkey/processor_pick_key.go index 035159de2e..d08daa8f66 100644 --- a/plugins/processor/pickkey/processor_pick_key.go +++ b/plugins/processor/pickkey/processor_pick_key.go @@ -22,7 +22,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" ) -const pluginName = "processor_pick_key" +const pluginType = "processor_pick_key" // ProcessorPickKey is picker to select or drop specific keys in LogContents type ProcessorPickKey struct { @@ -44,7 +44,7 @@ func (p *ProcessorPickKey) Init(context pipeline.Context) error { p.context = context metricsRecord := p.context.GetMetricRecord() p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, "pick_key_lost") - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginName)) + p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) if len(p.Include) > 0 { p.includeMap = make(map[string]struct{}) @@ -116,7 +116,7 @@ func (p *ProcessorPickKey) ProcessLogs(logArray []*protocol.Log) []*protocol.Log } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorPickKey{} } } diff --git a/plugins/processor/ratelimit/processor_rate_limit.go b/plugins/processor/ratelimit/processor_rate_limit.go index 873ef95bdd..8414e176da 100644 --- a/plugins/processor/ratelimit/processor_rate_limit.go +++ b/plugins/processor/ratelimit/processor_rate_limit.go @@ -33,7 +33,7 @@ type ProcessorRateLimit struct { context pipeline.Context } -const pluginName = "processor_rate_limit" +const pluginType = "processor_rate_limit" func (p *ProcessorRateLimit) Init(context pipeline.Context) error { p.context = context @@ -46,8 +46,8 @@ func (p *ProcessorRateLimit) Init(context pipeline.Context) error { p.Algorithm = newTokenBucket(limit) metricsRecord := p.context.GetMetricRecord() - p.limitMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_limited", pluginName)) - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginName)) + p.limitMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_limited", pluginType)) + p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) return nil } @@ -100,7 +100,7 @@ func (p *ProcessorRateLimit) makeKey(log *protocol.Log) string { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorRateLimit{} } } diff --git a/plugins/processor/rename/processor_rename.go b/plugins/processor/rename/processor_rename.go index fb420cadc4..60cfcbc9db 100644 --- a/plugins/processor/rename/processor_rename.go +++ b/plugins/processor/rename/processor_rename.go @@ -33,19 +33,19 @@ type ProcessorRename struct { context pipeline.Context } -const pluginName = "processor_rename" +const pluginType = "processor_rename" // Init called for init some system resources, like socket, mutex... func (p *ProcessorRename) Init(context pipeline.Context) error { p.context = context if len(p.SourceKeys) == 0 { - return fmt.Errorf("must specify SourceKeys for plugin %v", pluginName) + return fmt.Errorf("must specify SourceKeys for plugin %v", pluginType) } if len(p.DestKeys) == 0 { - return fmt.Errorf("must specify DestKeys for plugin %v", pluginName) + return fmt.Errorf("must specify DestKeys for plugin %v", pluginType) } if len(p.SourceKeys) != len(p.DestKeys) { - return fmt.Errorf("The length of SourceKeys does not match the length of DestKeys for plugin %v", pluginName) + return fmt.Errorf("The length of SourceKeys does not match the length of DestKeys for plugin %v", pluginType) } p.keyDictionary = make(map[string]string) for i, source := range p.SourceKeys { @@ -147,7 +147,7 @@ func (p *ProcessorRename) processOtherEvent(event models.PipelineEvent) { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return &ProcessorRename{ NoKeyError: false, SourceKeys: nil, diff --git a/plugins/processor/strptime/strptime.go b/plugins/processor/strptime/strptime.go index 0dccdc211b..2cb17da568 100644 --- a/plugins/processor/strptime/strptime.go +++ b/plugins/processor/strptime/strptime.go @@ -28,7 +28,7 @@ import ( ) const ( - pluginName = "processor_strptime" + pluginType = "processor_strptime" defaultSourceKey = "time" nilUTCOffset = 15 * 60 * 60 @@ -75,7 +75,7 @@ type Strptime struct { func (s *Strptime) Init(context pipeline.Context) error { s.context = context if len(s.Format) == 0 { - return fmt.Errorf("format can not be empty for plugin %v", pluginName) + return fmt.Errorf("format can not be empty for plugin %v", pluginType) } if !s.AdjustUTCOffset { s.UTCOffset = nilUTCOffset @@ -188,7 +188,7 @@ func newStrptime() *Strptime { } func init() { - pipeline.Processors[pluginName] = func() pipeline.Processor { + pipeline.Processors[pluginType] = func() pipeline.Processor { return newStrptime() } } diff --git a/scripts/gen_build_scripts.sh b/scripts/gen_build_scripts.sh index b4bb865ea4..d053d954f1 100755 --- a/scripts/gen_build_scripts.sh +++ b/scripts/gen_build_scripts.sh @@ -107,7 +107,7 @@ function generateCopyScript() { fi if [ $BUILD_LOGTAIL_UT = "ON" ]; then echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build core/build' >>$COPY_SCRIPT_FILE - echo 'rm -rf core/log_pb && docker cp "$id":'${PATH_IN_DOCKER}'/core/log_pb core/log_pb' >>$COPY_SCRIPT_FILE + echo 'rm -rf core/protobuf/sls && docker cp "$id":'${PATH_IN_DOCKER}'/core/protobuf/sls core/protobuf/sls' >>$COPY_SCRIPT_FILE fi else echo 'docker cp "$id":'${PATH_IN_DOCKER}'/'${OUT_DIR}'/libPluginBase.so $BINDIR' >>$COPY_SCRIPT_FILE @@ -115,7 +115,7 @@ function generateCopyScript() { echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/go_pipeline/libPluginAdapter.so $BINDIR' >>$COPY_SCRIPT_FILE if [ $BUILD_LOGTAIL_UT = "ON" ]; then echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build core/build' >>$COPY_SCRIPT_FILE - echo 'rm -rf core/log_pb && docker cp "$id":'${PATH_IN_DOCKER}'/core/log_pb core/log_pb' >>$COPY_SCRIPT_FILE + echo 'rm -rf core/protobuf/sls && docker cp "$id":'${PATH_IN_DOCKER}'/core/protobuf/sls core/protobuf/sls' >>$COPY_SCRIPT_FILE fi fi echo 'echo -e "{\n}" > $BINDIR/ilogtail_config.json' >>$COPY_SCRIPT_FILE diff --git a/scripts/package_license.sh b/scripts/package_license.sh index e1d471ae41..9e15bd0946 100755 --- a/scripts/package_license.sh +++ b/scripts/package_license.sh @@ -24,7 +24,7 @@ if [ "$OPERATION" = "check" ]; then fi command+=' -ignore "**/.idea/**" -ignore "**/protocol/**" -ignore "**/oldtest/**"' command+=' -ignore "**/internal/**" -ignore "**/diagnose/**" -ignore "**/external/**" -ignore "**/*.html"' -command+=' -ignore "**/core/log_pb/*.pb.*" -ignore "core/common/Version.cpp"' +command+=' -ignore "**/core/protobuf/**/*.pb.*" -ignore "core/common/Version.cpp"' command+=" $SCOPE" eval "$command" diff --git a/test/config/config.go b/test/config/config.go index c9706c2c93..398928a0bb 100644 --- a/test/config/config.go +++ b/test/config/config.go @@ -13,7 +13,13 @@ // limitations under the License. package config -import "time" +import ( + "os" + "strconv" + "time" + + "github.com/alibaba/ilogtail/pkg/logger" +) var TestConfig Config @@ -33,6 +39,7 @@ type Config struct { // SLS Project string `mapstructure:"project" yaml:"project"` Logstore string `mapstructure:"logstore" yaml:"logstore"` + MetricStore string `mapstructure:"metric_store" yaml:"metric_store"` AccessKeyID string `mapstructure:"access_key_id" yaml:"access_key_id"` AccessKeySecret string `mapstructure:"access_key_secret" yaml:"access_key_secret"` Endpoint string `mapstructure:"endpoint" yaml:"endpoint"` @@ -41,3 +48,50 @@ type Config struct { Region string `mapstructure:"region" yaml:"region"` RetryTimeout time.Duration `mapstructure:"retry_timeout" yaml:"retry_timeout"` } + +func (s *Config) GetLogstore(telemetryType string) string { + if s != nil && telemetryType == "metrics" { + return s.MetricStore + } + return s.Logstore +} + +func ParseConfig() { + loggerOptions := []logger.ConfigOption{ + logger.OptionAsyncLogger, + } + loggerOptions = append(loggerOptions, logger.OptionInfoLevel) + logger.InitTestLogger(loggerOptions...) + + TestConfig = Config{} + // Log + TestConfig.GeneratedLogDir = os.Getenv("GENERATED_LOG_DIR") + if len(TestConfig.GeneratedLogDir) == 0 { + TestConfig.GeneratedLogDir = "/tmp/ilogtail" + } + TestConfig.WorkDir = os.Getenv("WORK_DIR") + + // SSH + TestConfig.SSHUsername = os.Getenv("SSH_USERNAME") + TestConfig.SSHIP = os.Getenv("SSH_IP") + TestConfig.SSHPassword = os.Getenv("SSH_PASSWORD") + + // K8s + TestConfig.KubeConfigPath = os.Getenv("KUBE_CONFIG_PATH") + + // SLS + TestConfig.Project = os.Getenv("PROJECT") + TestConfig.Logstore = os.Getenv("LOGSTORE") + TestConfig.MetricStore = os.Getenv("METRIC_STORE") + TestConfig.AccessKeyID = os.Getenv("ACCESS_KEY_ID") + TestConfig.AccessKeySecret = os.Getenv("ACCESS_KEY_SECRET") + TestConfig.Endpoint = os.Getenv("ENDPOINT") + TestConfig.Aliuid = os.Getenv("ALIUID") + TestConfig.QueryEndpoint = os.Getenv("QUERY_ENDPOINT") + TestConfig.Region = os.Getenv("REGION") + timeout, err := strconv.ParseInt(os.Getenv("RETRY_TIMEOUT"), 10, 64) + if err != nil { + timeout = 60 + } + TestConfig.RetryTimeout = time.Duration(timeout) * time.Second +} diff --git a/test/config/context.go b/test/config/context.go index 685ddec9b4..ad0018d4dc 100644 --- a/test/config/context.go +++ b/test/config/context.go @@ -16,8 +16,10 @@ package config type ContextKey string const ( - StartTimeContextKey ContextKey = "startTime" - DependOnContainerKey ContextKey = "dependOnContainer" - MountVolumeKey ContextKey = "mountVolume" - ExposePortKey ContextKey = "exposePort" + StartTimeContextKey ContextKey = "startTime" + DependOnContainerKey ContextKey = "dependOnContainer" + MountVolumeKey ContextKey = "mountVolume" + ExposePortKey ContextKey = "exposePort" + CurrentWorkingDeploymentKey ContextKey = "currentWorkingDeployment" + QueryKey ContextKey = "query" ) diff --git a/test/e2e/e2e_docker_test.go b/test/e2e/e2e_docker_test.go index b4ef2ccdc5..a38ff43644 100644 --- a/test/e2e/e2e_docker_test.go +++ b/test/e2e/e2e_docker_test.go @@ -18,6 +18,8 @@ import ( "testing" "github.com/cucumber/godog" + + "github.com/alibaba/ilogtail/test/engine" ) func TestE2EOnDockerCompose(t *testing.T) { @@ -28,7 +30,7 @@ func TestE2EOnDockerCompose(t *testing.T) { } suite := godog.TestSuite{ Name: "E2EOnDockerCompose", - ScenarioInitializer: scenarioInitializer, + ScenarioInitializer: engine.ScenarioInitializer, Options: &godog.Options{ Format: "pretty", Paths: []string{featurePath}, @@ -45,7 +47,7 @@ func TestE2EOnDockerCompose(t *testing.T) { func TestE2EOnDockerComposeCore(t *testing.T) { suite := godog.TestSuite{ Name: "E2EOnDockerCompose", - ScenarioInitializer: scenarioInitializer, + ScenarioInitializer: engine.ScenarioInitializer, Options: &godog.Options{ Format: "pretty", Paths: []string{"test_cases"}, @@ -61,7 +63,7 @@ func TestE2EOnDockerComposeCore(t *testing.T) { func TestE2EOnDockerComposePerformance(t *testing.T) { suite := godog.TestSuite{ Name: "E2EOnDockerCompose", - ScenarioInitializer: scenarioInitializer, + ScenarioInitializer: engine.ScenarioInitializer, Options: &godog.Options{ Format: "pretty", Paths: []string{"test_cases"}, diff --git a/test/e2e/test_cases/input_canal/case.feature b/test/e2e/test_cases/input_canal/case.feature index fe2497e25a..223353782f 100644 --- a/test/e2e/test_cases/input_canal/case.feature +++ b/test/e2e/test_cases/input_canal/case.feature @@ -28,7 +28,7 @@ Feature: input canal """ """ Then there is at least {10} logs - Then the log fields match + Then the log fields match as below """ - _db_ - _gtid_ diff --git a/test/e2e/test_cases/input_canal_binfile_mode/case.feature b/test/e2e/test_cases/input_canal_binfile_mode/case.feature index 595274df9e..dbe5e93df5 100644 --- a/test/e2e/test_cases/input_canal_binfile_mode/case.feature +++ b/test/e2e/test_cases/input_canal_binfile_mode/case.feature @@ -28,7 +28,7 @@ Feature: input canal binfile mode """ """ Then there is at least {10} logs - Then the log fields match + Then the log fields match as below """ - _db_ - _gtid_ diff --git a/test/e2e/test_cases/input_container_stdio_multiline/case.feature b/test/e2e/test_cases/input_container_stdio_multiline/case.feature index 93486ae9b8..a2551e5af3 100644 --- a/test/e2e/test_cases/input_container_stdio_multiline/case.feature +++ b/test/e2e/test_cases/input_container_stdio_multiline/case.feature @@ -20,6 +20,7 @@ Feature: input container stdio multiline IgnoringStdout: false Multiline: StartPattern: "today" + FlushTimeoutSecs: 5 """ When start docker-compose {input_container_stdio_multiline} Then there is at least {1} logs diff --git a/test/e2e/test_cases/input_http/case.feature b/test/e2e/test_cases/input_http/case.feature index a9961cadb0..e7d5549aed 100644 --- a/test/e2e/test_cases/input_http/case.feature +++ b/test/e2e/test_cases/input_http/case.feature @@ -30,7 +30,7 @@ Feature: input http """ When start docker-compose {input_http} Then there is at least {4} logs - Then the log fields match + Then the log fields match as below """ - _method_ - _address_ diff --git a/test/e2e/test_cases/input_mock_log/case.feature b/test/e2e/test_cases/input_mock_log/case.feature index eda2812400..e4482f65ea 100644 --- a/test/e2e/test_cases/input_mock_log/case.feature +++ b/test/e2e/test_cases/input_mock_log/case.feature @@ -23,7 +23,7 @@ Feature: input mock log """ When start docker-compose {input_mock_log} Then there is at least {15} logs - Then the log fields match + Then the log fields match as below """ - tag1 - tag2 diff --git a/test/e2e/test_cases/input_mock_metric/case.feature b/test/e2e/test_cases/input_mock_metric/case.feature index 862bdbf9b6..3678af6525 100644 --- a/test/e2e/test_cases/input_mock_metric/case.feature +++ b/test/e2e/test_cases/input_mock_metric/case.feature @@ -24,14 +24,14 @@ Feature: input mock metric """ When start docker-compose {input_mock_metric} Then there is at least {15} logs - Then the log fields match + Then the log fields match as below """ - __labels__ - __time_nano__ - __value__ - __name__ """ - Then the log labels match + Then the log labels match as below """ - content - tag1 diff --git a/test/e2e/test_cases/input_mssql/case.feature b/test/e2e/test_cases/input_mssql/case.feature index 2126231470..94b611afff 100644 --- a/test/e2e/test_cases/input_mssql/case.feature +++ b/test/e2e/test_cases/input_mssql/case.feature @@ -31,7 +31,7 @@ Feature: input mssql Given iLogtail depends on containers {["setup"]} When start docker-compose {input_mssql} Then there is at least {4} logs - Then the log fields match + Then the log fields match as below """ - id - name diff --git a/test/e2e/test_cases/input_mysql/case.feature b/test/e2e/test_cases/input_mysql/case.feature index e034040bba..3f5dafa48c 100644 --- a/test/e2e/test_cases/input_mysql/case.feature +++ b/test/e2e/test_cases/input_mysql/case.feature @@ -31,7 +31,7 @@ Feature: input mysql Given iLogtail depends on containers {["mysql"]} When start docker-compose {input_mysql} Then there is at least {500} logs - Then the log fields match + Then the log fields match as below """ - help_keyword_id - name diff --git a/test/e2e/test_cases/input_pgsql/case.feature b/test/e2e/test_cases/input_pgsql/case.feature index 99a2508066..b28f47f1cc 100644 --- a/test/e2e/test_cases/input_pgsql/case.feature +++ b/test/e2e/test_cases/input_pgsql/case.feature @@ -31,7 +31,7 @@ Feature: input pgsql Given iLogtail depends on containers {["pgsql"]} When start docker-compose {input_pgsql} Then there is at least {10} logs - Then the log fields match + Then the log fields match as below """ - id - time diff --git a/test/e2e/test_cases/input_prometheus/case.feature b/test/e2e/test_cases/input_prometheus/case.feature index c9d9ebdf68..26482bf22c 100644 --- a/test/e2e/test_cases/input_prometheus/case.feature +++ b/test/e2e/test_cases/input_prometheus/case.feature @@ -24,7 +24,7 @@ Feature: input prometheus """ When start docker-compose {input_prometheus} Then there is at least {10} logs - Then the log fields match + Then the log fields match as below """ - __name__ - __labels__ diff --git a/test/engine/cleanup/cache.go b/test/engine/cleanup/cache.go index caa945eb1d..a0224ba376 100644 --- a/test/engine/cleanup/cache.go +++ b/test/engine/cleanup/cache.go @@ -21,7 +21,7 @@ import ( func GoTestCache(ctx context.Context) (context.Context, error) { command := "/usr/local/go/bin/go clean -testcache" - if err := setup.Env.ExecOnSource(command); err != nil { + if err := setup.Env.ExecOnSource(ctx, command); err != nil { return ctx, err } return ctx, nil diff --git a/test/engine/cleanup/helper.go b/test/engine/cleanup/helper.go index 39562e493e..c223800104 100644 --- a/test/engine/cleanup/helper.go +++ b/test/engine/cleanup/helper.go @@ -18,6 +18,7 @@ import ( "os" "os/signal" "syscall" + "time" "github.com/alibaba/ilogtail/test/engine/control" "github.com/alibaba/ilogtail/test/engine/setup" @@ -50,4 +51,5 @@ func All() { if subscriber.TestSubscriber != nil { _ = subscriber.TestSubscriber.Stop() } + time.Sleep(5 * time.Second) } diff --git a/test/engine/cleanup/log.go b/test/engine/cleanup/log.go index 403321a02d..a1a701c094 100644 --- a/test/engine/cleanup/log.go +++ b/test/engine/cleanup/log.go @@ -23,7 +23,7 @@ import ( func AllGeneratedLog(ctx context.Context) (context.Context, error) { command := fmt.Sprintf("rm -rf %s/*", config.TestConfig.GeneratedLogDir) - if err := setup.Env.ExecOnSource(command); err != nil { + if err := setup.Env.ExecOnSource(ctx, command); err != nil { return ctx, err } return ctx, nil diff --git a/test/engine/control/config.go b/test/engine/control/config.go index 6dbfadc8c6..ae32887940 100644 --- a/test/engine/control/config.go +++ b/test/engine/control/config.go @@ -22,6 +22,7 @@ import ( "net/http" "os" "strings" + "time" global_config "github.com/alibaba/ilogtail/pkg/config" "github.com/alibaba/ilogtail/pkg/logger" @@ -58,6 +59,7 @@ func AddLocalConfig(ctx context.Context, configName, c string) (context.Context, if err := setup.Env.ExecOnLogtail(command); err != nil { return ctx, err } + time.Sleep(5 * time.Second) } return ctx, nil } diff --git a/test/engine/control/query.go b/test/engine/control/query.go new file mode 100644 index 0000000000..2d2ea1a6df --- /dev/null +++ b/test/engine/control/query.go @@ -0,0 +1,19 @@ +package control + +import ( + "context" + + "github.com/alibaba/ilogtail/test/config" +) + +func SetQuery(ctx context.Context, sql string) (context.Context, error) { + return context.WithValue(ctx, config.QueryKey, sql), nil +} + +func GetQuery(ctx context.Context) string { + value := ctx.Value(config.QueryKey) + if value == nil { + return "*" + } + return value.(string) +} diff --git a/test/engine/setup/controller/kubernetes.go b/test/engine/setup/controller/kubernetes.go index b405d2f78b..f17ed7e72a 100644 --- a/test/engine/setup/controller/kubernetes.go +++ b/test/engine/setup/controller/kubernetes.go @@ -48,14 +48,24 @@ func (c *DeploymentController) GetDeploymentPods(deploymentName, deploymentNames if err != nil { return nil, err } - selector := metav1.FormatLabelSelector(deployment.Spec.Selector) + labels := map[string]string{ + "app": deployment.Spec.Template.Labels["app"], + } + selector := metav1.FormatLabelSelector(&metav1.LabelSelector{MatchLabels: labels}) listOptions := metav1.ListOptions{LabelSelector: selector} pods, err := c.k8sClient.CoreV1().Pods(deploymentNamespace).List(context.TODO(), listOptions) if err != nil { return nil, err } - // Only return running pods, terminating pods will be excluded + return pods, nil +} + +func (c *DeploymentController) GetRunningDeploymentPods(deploymentName, deploymentNamespace string) (*corev1.PodList, error) { + pods, err := c.GetDeploymentPods(deploymentName, deploymentNamespace) + if err != nil { + return nil, err + } runningPods := make([]corev1.Pod, 0) for _, pod := range pods.Items { if pod.DeletionTimestamp == nil { @@ -142,10 +152,25 @@ func (c *DeploymentController) waitDeploymentAvailable(deploymentName, deploymen if err != nil { return err } - if deployment.Status.AvailableReplicas == *deployment.Spec.Replicas && pods != nil && len(pods.Items) == int(*deployment.Spec.Replicas) { - return nil + if !(deployment.Status.AvailableReplicas == *deployment.Spec.Replicas && pods != nil && len(pods.Items) == int(*deployment.Spec.Replicas)) { + return fmt.Errorf("deployment %s/%s not available yet", deploymentNamespace, deploymentName) + } + for _, pod := range pods.Items { + deployment.Spec.Template.Labels["pod-template-hash"] = pod.Labels["pod-template-hash"] + fmt.Println(pod.Name, pod.Labels, deployment.Spec.Template.Labels) + if len(deployment.Spec.Template.Labels) != len(pod.Labels) { + return fmt.Errorf("pod %s/%s not match labels", pod.Namespace, pod.Name) + } + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("pod %s/%s not running yet", pod.Namespace, pod.Name) + } + for label, value := range deployment.Spec.Template.Labels { + if v, ok := pod.Labels[label]; !ok || v != value { + return fmt.Errorf("pod %s/%s not match label %s=%s", pod.Namespace, pod.Name, label, value) + } + } } - return fmt.Errorf("deployment %s/%s not available yet", deploymentNamespace, deploymentName) + return nil }, retry.Context(timeoutCtx), retry.Delay(5*time.Second), diff --git a/test/engine/setup/docker_compose.go b/test/engine/setup/docker_compose.go index 202a4c6230..10c2e3439e 100644 --- a/test/engine/setup/docker_compose.go +++ b/test/engine/setup/docker_compose.go @@ -113,6 +113,6 @@ func (d *DockerComposeEnv) ExecOnLogtail(command string) error { return fmt.Errorf("not implemented") } -func (d *DockerComposeEnv) ExecOnSource(command string) error { +func (d *DockerComposeEnv) ExecOnSource(ctx context.Context, command string) error { return fmt.Errorf("not implemented") } diff --git a/test/engine/setup/dockercompose/compose.go b/test/engine/setup/dockercompose/compose.go index 76e4b7000f..fda303e3f2 100644 --- a/test/engine/setup/dockercompose/compose.go +++ b/test/engine/setup/dockercompose/compose.go @@ -113,11 +113,23 @@ func (c *ComposeBooter) Start(ctx context.Context) error { projectName = fmt.Sprintf("%x", hasher.Sum(nil)) compose := testcontainers.NewLocalDockerCompose([]string{config.CaseHome + finalFileName}, projectName).WithCommand([]string{"up", "-d", "--build"}) strategyWrappers := withExposedService(compose) - execError := compose.Invoke() - if execError.Error != nil { - logger.Error(context.Background(), "START_DOCKER_COMPOSE_ERROR", - "stdout", execError.Error.Error()) - return execError.Error + // retry 3 times + for i := 0; i < 3; i++ { + execError := compose.Invoke() + if execError.Error == nil { + break + } + if i == 2 { + logger.Error(context.Background(), "START_DOCKER_COMPOSE_ERROR", + "stdout", execError.Error.Error()) + return execError.Error + } + execError = testcontainers.NewLocalDockerCompose([]string{config.CaseHome + finalFileName}, projectName).Down() + if execError.Error != nil { + logger.Error(context.Background(), "DOWN_DOCKER_COMPOSE_ERROR", + "stdout", execError.Error.Error()) + return execError.Error + } } cli, err := CreateDockerClient() if err != nil { diff --git a/test/engine/setup/env.go b/test/engine/setup/env.go index ebc2dd47f9..75a60cc595 100644 --- a/test/engine/setup/env.go +++ b/test/engine/setup/env.go @@ -13,12 +13,16 @@ // limitations under the License. package setup +import ( + "context" +) + var Env TestEnv type TestEnv interface { GetType() string ExecOnLogtail(command string) error - ExecOnSource(command string) error + ExecOnSource(ctx context.Context, command string) error } func InitEnv(envType string) { @@ -29,5 +33,12 @@ func InitEnv(envType string) { Env = NewDaemonSetEnv() case "docker-compose": Env = NewDockerComposeEnv() + case "deployment": + Env = NewDeploymentEnv() } } + +func Mkdir(ctx context.Context, dir string) (context.Context, error) { + command := "mkdir -p " + dir + return ctx, Env.ExecOnSource(ctx, command) +} diff --git a/test/engine/setup/host.go b/test/engine/setup/host.go index 87095b8435..ea6fe500a8 100644 --- a/test/engine/setup/host.go +++ b/test/engine/setup/host.go @@ -42,7 +42,7 @@ func (h *HostEnv) ExecOnLogtail(command string) error { return h.exec(command) } -func (h *HostEnv) ExecOnSource(command string) error { +func (h *HostEnv) ExecOnSource(ctx context.Context, command string) error { return h.exec(command) } diff --git a/test/engine/setup/k8s.go b/test/engine/setup/k8s.go index 94e5f408a8..7ee1c61aa9 100644 --- a/test/engine/setup/k8s.go +++ b/test/engine/setup/k8s.go @@ -15,6 +15,7 @@ package setup import ( "bytes" + "context" "crypto/rand" "fmt" "math/big" @@ -46,6 +47,14 @@ func NewDaemonSetEnv() *K8sEnv { return env } +func NewDeploymentEnv() *K8sEnv { + env := &K8sEnv{ + deployType: "deployment", + } + env.init() + return env +} + func (k *K8sEnv) GetType() string { return k.deployType } @@ -54,9 +63,18 @@ func (k *K8sEnv) ExecOnLogtail(command string) error { if k.k8sClient == nil { return fmt.Errorf("k8s client init failed") } - pods, err := k.daemonsetController.GetDaemonSetPods("logtail-ds", "kube-system") - if err != nil { - return err + var pods *corev1.PodList + var err error + if k.deployType == "daemonset" { + pods, err = k.daemonsetController.GetDaemonSetPods("logtail-ds", "kube-system") + if err != nil { + return err + } + } else if k.deployType == "deployment" { + pods, err = k.deploymentController.GetRunningDeploymentPods("cluster-agent", "loong-collector") + if err != nil { + return err + } } for _, pod := range pods.Items { if err := k.execInPod(k.config, pod.Namespace, pod.Name, pod.Spec.Containers[0].Name, []string{"bash", "-c", command}); err != nil { @@ -66,11 +84,15 @@ func (k *K8sEnv) ExecOnLogtail(command string) error { return nil } -func (k *K8sEnv) ExecOnSource(command string) error { +func (k *K8sEnv) ExecOnSource(ctx context.Context, command string) error { if k.k8sClient == nil { return fmt.Errorf("k8s client init failed") } - pods, err := k.deploymentController.GetDeploymentPods("e2e-generator", "default") + deploymentName := "e2e-generator" + if ctx.Value(config.CurrentWorkingDeploymentKey) != nil { + deploymentName = ctx.Value(config.CurrentWorkingDeploymentKey).(string) + } + pods, err := k.deploymentController.GetRunningDeploymentPods(deploymentName, "default") if err != nil { return err } @@ -79,6 +101,7 @@ func (k *K8sEnv) ExecOnSource(command string) error { return err } pod := pods.Items[randomIndex.Int64()] + fmt.Println("exec on pod: ", pod.Name) if err := k.execInPod(k.config, pod.Namespace, pod.Name, pod.Spec.Containers[0].Name, []string{"sh", "-c", command}); err != nil { return err } @@ -93,6 +116,10 @@ func (k *K8sEnv) RemoveFilter(filter controller.ContainerFilter) error { return k.deploymentController.RemoveFilter("e2e-generator", filter) } +func SwitchCurrentWorkingDeployment(ctx context.Context, deploymentName string) (context.Context, error) { + return context.WithValue(ctx, config.CurrentWorkingDeploymentKey, deploymentName), nil +} + func (k *K8sEnv) init() { var c *rest.Config var err error diff --git a/test/engine/setup/subscriber/clickhouse.go b/test/engine/setup/subscriber/clickhouse.go index c9abb492dd..1d7b1b43e5 100644 --- a/test/engine/setup/subscriber/clickhouse.go +++ b/test/engine/setup/subscriber/clickhouse.go @@ -55,7 +55,7 @@ func (i *ClickHouseSubscriber) Description() string { return "this's a clickhouse subscriber, which will query inserted records from clickhouse periodically." } -func (i *ClickHouseSubscriber) GetData(startTime int32) ([]*protocol.LogGroup, error) { +func (i *ClickHouseSubscriber) GetData(sql string, startTime int32) ([]*protocol.LogGroup, error) { host, err := TryReplacePhysicalAddress(i.Address) if err != nil { return nil, err diff --git a/test/engine/setup/subscriber/elasticsearch.go b/test/engine/setup/subscriber/elasticsearch.go index 1e5efdedf7..f814d9ea65 100644 --- a/test/engine/setup/subscriber/elasticsearch.go +++ b/test/engine/setup/subscriber/elasticsearch.go @@ -53,7 +53,7 @@ func (i *ElasticSearchSubscriber) Description() string { return "this's a elasticsearch subscriber, which will query inserted records from elasticsearch periodically." } -func (i *ElasticSearchSubscriber) GetData(startTime int32) ([]*protocol.LogGroup, error) { +func (i *ElasticSearchSubscriber) GetData(sql string, startTime int32) ([]*protocol.LogGroup, error) { host, err := TryReplacePhysicalAddress(i.Address) if err != nil { return nil, err diff --git a/test/engine/setup/subscriber/grpc.go b/test/engine/setup/subscriber/grpc.go index 2c27c6c1a9..e05137255b 100644 --- a/test/engine/setup/subscriber/grpc.go +++ b/test/engine/setup/subscriber/grpc.go @@ -52,7 +52,7 @@ type GRPCService struct { protocol.UnimplementedLogReportServiceServer } -func (g *GrpcSubscriber) GetData(int32) ([]*protocol.LogGroup, error) { +func (g *GrpcSubscriber) GetData(string, int32) ([]*protocol.LogGroup, error) { for { select { case logGroup, ok := <-g.channel: diff --git a/test/engine/setup/subscriber/influxdb.go b/test/engine/setup/subscriber/influxdb.go index e0f7e45463..f88393812d 100644 --- a/test/engine/setup/subscriber/influxdb.go +++ b/test/engine/setup/subscriber/influxdb.go @@ -55,7 +55,7 @@ func (i *InfluxdbSubscriber) Description() string { return "this's a influxdb subscriber, which will query inserted records from influxdb periodically." } -func (i *InfluxdbSubscriber) GetData(startTime int32) ([]*protocol.LogGroup, error) { +func (i *InfluxdbSubscriber) GetData(_ string, startTime int32) ([]*protocol.LogGroup, error) { host, err := TryReplacePhysicalAddress(i.DbHost) if err != nil { return nil, err diff --git a/test/engine/setup/subscriber/loki.go b/test/engine/setup/subscriber/loki.go index 191bcc450f..702d13e8c7 100644 --- a/test/engine/setup/subscriber/loki.go +++ b/test/engine/setup/subscriber/loki.go @@ -76,7 +76,7 @@ func (l *LokiSubscriber) Description() string { return "this a loki subscriber, which is the default mock backend for Ilogtail." } -func (l *LokiSubscriber) GetData(startTime int32) ([]*protocol.LogGroup, error) { +func (l *LokiSubscriber) GetData(sql string, startTime int32) ([]*protocol.LogGroup, error) { host, err := TryReplacePhysicalAddress(l.Address) if err != nil { return nil, err diff --git a/test/engine/setup/subscriber/sls.go b/test/engine/setup/subscriber/sls.go index ed08b73412..9d05c56cc6 100644 --- a/test/engine/setup/subscriber/sls.go +++ b/test/engine/setup/subscriber/sls.go @@ -3,7 +3,6 @@ package subscriber import ( "fmt" "strings" - "sync" "text/template" "time" @@ -21,19 +20,15 @@ const SLSFlusherConfigTemplate = ` flushers: - Type: flusher_sls Aliuid: "{{.Aliuid}}" - TelemetryType: "logs" + TelemetryType: "{{.TelemetryType}}" Region: {{.Region}} Endpoint: {{.Endpoint}} Project: {{.Project}} Logstore: {{.Logstore}}` -var SLSFlusherConfig string -var SLSFlusherConfigOnce sync.Once - -const queryCountSQL = "* | SELECT * FROM log WHERE from_unixtime(__time__) >= from_unixtime(%v) AND from_unixtime(__time__) < now()" - type SLSSubscriber struct { - client *sls.Client + client *sls.Client + TelemetryType string } func (s *SLSSubscriber) Name() string { @@ -44,8 +39,9 @@ func (s *SLSSubscriber) Description() string { return "this a sls subscriber" } -func (s *SLSSubscriber) GetData(startTime int32) ([]*protocol.LogGroup, error) { - resp, err := s.getLogFromSLS(fmt.Sprintf(queryCountSQL, startTime), startTime) +func (s *SLSSubscriber) GetData(query string, startTime int32) ([]*protocol.LogGroup, error) { + query = s.getCompleteQuery(query) + resp, err := s.getLogFromSLS(query, startTime) if err != nil { return nil, err } @@ -66,41 +62,57 @@ func (s *SLSSubscriber) GetData(startTime int32) ([]*protocol.LogGroup, error) { } func (s *SLSSubscriber) FlusherConfig() string { - SLSFlusherConfigOnce.Do(func() { - tpl := template.Must(template.New("slsFlusherConfig").Parse(SLSFlusherConfigTemplate)) - var builder strings.Builder - _ = tpl.Execute(&builder, map[string]interface{}{ - "Aliuid": config.TestConfig.Aliuid, - "Region": config.TestConfig.Region, - "Endpoint": config.TestConfig.Endpoint, - "Project": config.TestConfig.Project, - "Logstore": config.TestConfig.Logstore, - }) - SLSFlusherConfig = builder.String() + tpl := template.Must(template.New("slsFlusherConfig").Parse(SLSFlusherConfigTemplate)) + var builder strings.Builder + _ = tpl.Execute(&builder, map[string]interface{}{ + "Aliuid": config.TestConfig.Aliuid, + "Region": config.TestConfig.Region, + "Endpoint": config.TestConfig.Endpoint, + "Project": config.TestConfig.Project, + "Logstore": config.TestConfig.GetLogstore(s.TelemetryType), + "TelemetryType": s.TelemetryType, }) - return SLSFlusherConfig + config := builder.String() + return config } func (s *SLSSubscriber) Stop() error { return nil } +func (s *SLSSubscriber) getCompleteQuery(query string) string { + if query == "" { + return "*" + } + switch s.TelemetryType { + case "logs": + return query + case "metrics": + return fmt.Sprintf("* | select promql_query_range('%s') from metrics limit 10000", query) + case "traces": + return query + default: + return query + } +} + func (s *SLSSubscriber) getLogFromSLS(sql string, from int32) (*sls.GetLogsResponse, error) { now := int32(time.Now().Unix()) if now == from { now++ } + fmt.Println("get logs from sls with sql", sql, "from", from, "to", now, "in", config.TestConfig.GetLogstore(s.TelemetryType)) req := &sls.GetLogsRequest{ Query: tea.String(sql), From: tea.Int32(from), To: tea.Int32(now), } - resp, err := s.client.GetLogs(tea.String(config.TestConfig.Project), tea.String(config.TestConfig.Logstore), req) + resp, err := s.client.GetLogs(tea.String(config.TestConfig.Project), tea.String(config.TestConfig.GetLogstore(s.TelemetryType)), req) if err != nil { return nil, err } if len(resp.Body) == 0 { - return nil, fmt.Errorf("failed to get logs with sql %s, no log", sql) + return nil, fmt.Errorf("failed to get logs with sql %s from %v, no log", sql, from) } return resp, nil } @@ -117,10 +129,16 @@ func createSLSClient(accessKeyID, accessKeySecret, endpoint string) *sls.Client func init() { RegisterCreator(slsName, func(spec map[string]interface{}) (Subscriber, error) { + telemetryType := "logs" + if v, ok := spec["telemetry_type"]; ok { + telemetryType = v.(string) + } + fmt.Println("create sls subscriber with telemetry type", telemetryType) l := &SLSSubscriber{ - client: createSLSClient(config.TestConfig.AccessKeyID, config.TestConfig.AccessKeySecret, config.TestConfig.QueryEndpoint), + client: createSLSClient(config.TestConfig.AccessKeyID, config.TestConfig.AccessKeySecret, config.TestConfig.QueryEndpoint), + TelemetryType: telemetryType, } return l, nil }) - doc.Register("subscriber", lokiName, new(LokiSubscriber)) + doc.Register("subscriber", slsName, new(SLSSubscriber)) } diff --git a/test/engine/setup/subscriber/subscriber.go b/test/engine/setup/subscriber/subscriber.go index dbf438a510..62ea6bca32 100644 --- a/test/engine/setup/subscriber/subscriber.go +++ b/test/engine/setup/subscriber/subscriber.go @@ -44,7 +44,7 @@ type Subscriber interface { // Stop Stop() error // Get data - GetData(startTime int32) ([]*protocol.LogGroup, error) + GetData(sql string, startTime int32) ([]*protocol.LogGroup, error) // FlusherConfig returns the default flusher config for Ilogtail container to transfer the received or self telemetry data. FlusherConfig() string } diff --git a/test/e2e/main_test.go b/test/engine/steps.go similarity index 50% rename from test/e2e/main_test.go rename to test/engine/steps.go index 36dfb2516d..a104af2651 100644 --- a/test/e2e/main_test.go +++ b/test/engine/steps.go @@ -1,28 +1,11 @@ -// Copyright 2024 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package e2e +package engine import ( "context" - "os" - "strconv" - "testing" "time" "github.com/cucumber/godog" - "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/test/config" "github.com/alibaba/ilogtail/test/engine/cleanup" "github.com/alibaba/ilogtail/test/engine/control" @@ -32,49 +15,7 @@ import ( "github.com/alibaba/ilogtail/test/engine/verify" ) -func TestMain(m *testing.M) { - loggerOptions := []logger.ConfigOption{ - logger.OptionAsyncLogger, - } - loggerOptions = append(loggerOptions, logger.OptionInfoLevel) - logger.InitTestLogger(loggerOptions...) - - config.TestConfig = config.Config{} - // Log - config.TestConfig.GeneratedLogDir = os.Getenv("GENERATED_LOG_DIR") - if len(config.TestConfig.GeneratedLogDir) == 0 { - config.TestConfig.GeneratedLogDir = "/tmp/ilogtail" - } - config.TestConfig.WorkDir = os.Getenv("WORK_DIR") - - // SSH - config.TestConfig.SSHUsername = os.Getenv("SSH_USERNAME") - config.TestConfig.SSHIP = os.Getenv("SSH_IP") - config.TestConfig.SSHPassword = os.Getenv("SSH_PASSWORD") - - // K8s - config.TestConfig.KubeConfigPath = os.Getenv("KUBE_CONFIG_PATH") - - // SLS - config.TestConfig.Project = os.Getenv("PROJECT") - config.TestConfig.Logstore = os.Getenv("LOGSTORE") - config.TestConfig.AccessKeyID = os.Getenv("ACCESS_KEY_ID") - config.TestConfig.AccessKeySecret = os.Getenv("ACCESS_KEY_SECRET") - config.TestConfig.Endpoint = os.Getenv("ENDPOINT") - config.TestConfig.Aliuid = os.Getenv("ALIUID") - config.TestConfig.QueryEndpoint = os.Getenv("QUERY_ENDPOINT") - config.TestConfig.Region = os.Getenv("REGION") - timeout, err := strconv.ParseInt(os.Getenv("RETRY_TIMEOUT"), 10, 64) - if err != nil { - timeout = 60 - } - config.TestConfig.RetryTimeout = time.Duration(timeout) * time.Second - code := m.Run() - logger.Flush() - os.Exit(code) -} - -func scenarioInitializer(ctx *godog.ScenarioContext) { +func ScenarioInitializer(ctx *godog.ScenarioContext) { // Given ctx.Given(`^\{(\S+)\} environment$`, setup.InitEnv) ctx.Given(`^iLogtail depends on containers \{(.*)\}`, setup.SetDockerComposeDependOn) @@ -84,32 +25,41 @@ func scenarioInitializer(ctx *godog.ScenarioContext) { ctx.Given(`^\{(.*)\} http config as below`, control.AddHTTPConfig) ctx.Given(`^remove http config \{(.*)\}`, control.RemoveHTTPConfig) ctx.Given(`^subcribe data from \{(\S+)\} with config`, subscriber.InitSubscriber) + ctx.Given(`^mkdir \{(.*)\}`, setup.Mkdir) // When - ctx.When(`^generate \{(\d+)\} regex logs, with interval \{(\d+)\}ms$`, trigger.RegexSingle) + ctx.When(`^generate \{(\d+)\} regex logs to file \{(.*)\}, with interval \{(\d+)\}ms$`, trigger.RegexSingle) + ctx.When(`^generate \{(\d+)\} regex gbk logs to file \{(.*)\}, with interval \{(\d+)\}ms$`, trigger.RegexSingleGBK) ctx.When(`^generate \{(\d+)\} http logs, with interval \{(\d+)\}ms, url: \{(.*)\}, method: \{(.*)\}, body:`, trigger.HTTP) ctx.When(`^add k8s label \{(.*)\}`, control.AddLabel) ctx.When(`^remove k8s label \{(.*)\}`, control.RemoveLabel) ctx.When(`^start docker-compose \{(\S+)\}`, setup.StartDockerComposeEnv) + ctx.When(`^switch working on deployment \{(.*)\}`, setup.SwitchCurrentWorkingDeployment) + ctx.When(`^generate \{(\d+)\} apsara logs to file \{(.*)\}, with interval \{(\d+)\}ms$`, trigger.Apsara) + ctx.When(`^generate \{(\d+)\} delimiter logs to file \{(.*)\}, with interval \{(\d+)\}ms$`, trigger.DelimiterSingle) + ctx.When(`^query through \{(.*)\}`, control.SetQuery) // Then ctx.Then(`^there is \{(\d+)\} logs$`, verify.LogCount) + ctx.Then(`^there is more than \{(\d+)\} metrics in \{(\d+)\} seconds$`, verify.MetricCount) ctx.Then(`^there is at least \{(\d+)\} logs$`, verify.LogCountAtLeast) ctx.Then(`^there is at least \{(\d+)\} logs with filter key \{(.*)\} value \{(.*)\}$`, verify.LogCountAtLeastWithFilter) - ctx.Then(`^the log fields match regex single`, verify.RegexSingle) ctx.Then(`^the log fields match kv`, verify.LogFieldKV) ctx.Then(`^the log tags match kv`, verify.TagKV) ctx.Then(`^the context of log is valid$`, verify.LogContext) - ctx.Then(`^the log fields match`, verify.LogField) - ctx.Then(`^the log labels match`, verify.LogLabel) + ctx.Then(`^the log fields match as below`, verify.LogField) + ctx.Then(`^the log labels match as below`, verify.LogLabel) ctx.Then(`^the logtail log contains \{(\d+)\} times of \{(.*)\}$`, verify.LogtailPluginLog) ctx.Then(`wait \{(\d+)\} seconds`, func(ctx context.Context, t int) context.Context { time.Sleep(time.Duration(t) * time.Second) return ctx }) + // special pattern logs + ctx.Then(`^the log fields match regex single`, verify.RegexSingle) + ctx.Then(`^the log fields match apsara`, verify.Apsara) - // Cleanup ctx.Before(func(ctx context.Context, sc *godog.Scenario) (context.Context, error) { + config.ParseConfig() cleanup.HandleSignal() return ctx, nil }) diff --git a/test/engine/trigger/apsara_test.go b/test/engine/trigger/apsara_test.go new file mode 100644 index 0000000000..8efb323d23 --- /dev/null +++ b/test/engine/trigger/apsara_test.go @@ -0,0 +1,74 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package trigger + +import ( + "fmt" + "io" + "os" + "strconv" + "testing" + "time" +) + +// TestGenerateApsara will be executed in the environment being collected. +func TestGenerateApsara(t *testing.T) { + gneratedLogDir := getEnvOrDefault("GENERATED_LOG_DIR", "/tmp/ilogtail") + totalLog, err := strconv.Atoi(getEnvOrDefault("TOTAL_LOG", "100")) + if err != nil { + t.Fatalf("parse TOTAL_LOG failed: %v", err) + return + } + interval, err := strconv.Atoi(getEnvOrDefault("INTERVAL", "1")) + if err != nil { + t.Fatalf("parse INTERVAL failed: %v", err) + return + } + fileName := getEnvOrDefault("FILENAME", "apsara.log") + + testLogConent := []string{ + "[%s]\t[ERROR]\t[32337]\t[/build/core/application/Application:12]\tfile:file0\tlogNo:1199997\tmark:-\tmsg:hello world!", + "[%s]\t[ERROR]\t[20964]\t[/build/core/ilogtail.cpp:127]\tfile:file0\tlogNo:1199998\tmark:F\tmsg:这是一条消息", + "[%s]\t[WARNING]\t[32337]\t[/build/core/ilogtail.cpp:127]\tfile:file0\tlogNo:1199999\tmark:-\tmsg:hello world!", + "[%s]\t[INFO]\t[32337]\t[/build/core/ilogtail.cpp:127]\tfile:file0\tlogNo:1200000\tmark:-\tmsg:这是一条消息", + "[%s]\t[ERROR]\t[00001]\t[/build/core/ilogtail.cpp:127]\tfile:file0\tlogNo:1199992\tmark:-\tmsg:password:123456", + "[%s]\t[DEBUG]\t[32337]\t[/build/core/ilogtail.cpp:127]\tfile:file0\tlogNo:1199993\tmark:-\tmsg:hello world!", + } + file, err := os.OpenFile(fmt.Sprintf("%s/%s", gneratedLogDir, fileName), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + t.Fatalf("open file failed: %v", err) + return + } + defer file.Close() + + logIndex := 0 + for i := 0; i < totalLog; i++ { + var currentTime string + if i%2 == 0 { + currentTime = time.Now().Format("2006-01-02 15:04:05.000000") + } else { + currentTime = strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + _, err := io.WriteString(file, fmt.Sprintf(testLogConent[logIndex]+"\n", currentTime)) + if err != nil { + t.Fatalf("write log failed: %v", err) + return + } + time.Sleep(time.Duration(interval * int(time.Millisecond))) + logIndex++ + if logIndex >= len(testLogConent) { + logIndex = 0 + } + } +} diff --git a/test/engine/trigger/delimiter_test.go b/test/engine/trigger/delimiter_test.go new file mode 100644 index 0000000000..321056886e --- /dev/null +++ b/test/engine/trigger/delimiter_test.go @@ -0,0 +1,68 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package trigger + +import ( + "fmt" + "io" + "os" + "strconv" + "testing" + "time" +) + +// TestGenerateDelimiterSingle will be executed in the environment being collected. +func TestGenerateDelimiterSingle(t *testing.T) { + gneratedLogDir := getEnvOrDefault("GENERATED_LOG_DIR", "/tmp/ilogtail") + totalLog, err := strconv.Atoi(getEnvOrDefault("TOTAL_LOG", "100")) + if err != nil { + t.Fatalf("parse TOTAL_LOG failed: %v", err) + return + } + interval, err := strconv.Atoi(getEnvOrDefault("INTERVAL", "1")) + if err != nil { + t.Fatalf("parse INTERVAL failed: %v", err) + return + } + fileName := getEnvOrDefault("FILENAME", "apsara.log") + + testLogConent := []string{ + "'-' 'file0' '13196' '0.0.0.0' '%s' 'GET' '/index.html' 'HTTP/2.0' '302' '628' 'curl/7.10'", + "'-' 'file0' '13197' '10.45.26.0' '%s' 'GET' '/' 'HTTP/2.0' '302' '218' 'go-sdk'", + "'-' 'file0' '13198' '10.45.26.0' '%s' 'GET' '/dir/resource.txt' 'HTTP/1.1' '404' '744' 'Mozilla/5.0'", + "'-' 'file0' '13199' '127.0.0.1' '%s' 'PUT' '/' 'HTTP/2.0' '200' '320' 'curl/7.10'", + "'-' 'file0' '13200' '192.168.0.3' '%s' 'PUT' '/dir/resource.txt' 'HTTP/1.1' '404' '949' 'curl/7.10'", + } + file, err := os.OpenFile(fmt.Sprintf("%s/%s", gneratedLogDir, fileName), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + t.Fatalf("open file failed: %v", err) + return + } + defer file.Close() + + logIndex := 0 + for i := 0; i < totalLog; i++ { + currentTime := time.Now().Format("2006-01-02 15:04:05.000000000") + _, err := io.WriteString(file, fmt.Sprintf(testLogConent[logIndex]+"\n", currentTime)) + if err != nil { + t.Fatalf("write log failed: %v", err) + return + } + time.Sleep(time.Duration(interval * int(time.Millisecond))) + logIndex++ + if logIndex >= len(testLogConent) { + logIndex = 0 + } + } +} diff --git a/test/engine/trigger/helper.go b/test/engine/trigger/helper.go index 5921a279f0..6817e07bfd 100644 --- a/test/engine/trigger/helper.go +++ b/test/engine/trigger/helper.go @@ -18,7 +18,7 @@ import ( "os" ) -const commandTemplate = "/usr/local/go/bin/go test -v -run ^%s$ github.com/alibaba/ilogtail/test/engine/trigger" +const commandTemplate = "/usr/local/go/bin/go test -count=1 -v -run ^%s$ github.com/alibaba/ilogtail/test/engine/trigger" func getEnvOrDefault(env, fallback string) string { if value, ok := os.LookupEnv(env); ok { diff --git a/test/engine/trigger/regex.go b/test/engine/trigger/regex.go deleted file mode 100644 index bc349ebb38..0000000000 --- a/test/engine/trigger/regex.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2024 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package trigger - -import ( - "context" - "html/template" - "strings" - "time" - - "github.com/alibaba/ilogtail/test/config" - "github.com/alibaba/ilogtail/test/engine/setup" -) - -const triggerRegexTemplate = "cd {{.WorkDir}} && TOTAL_LOG={{.TotalLog}} INTERVAL={{.Interval}} FILENAME={{.Filename}} {{.Command}}" - -func RegexSingle(ctx context.Context, totalLog, interval int) (context.Context, error) { - command := getRunTriggerCommand("TestGenerateRegexLogSingle") - var triggerRegexCommand strings.Builder - template := template.Must(template.New("triggerRegexSingle").Parse(triggerRegexTemplate)) - if err := template.Execute(&triggerRegexCommand, map[string]interface{}{ - "WorkDir": config.TestConfig.WorkDir, - "TotalLog": totalLog, - "Interval": interval, - "Filename": "regex_single.log", - "Command": command, - }); err != nil { - return ctx, err - } - startTime := time.Now().Unix() - if err := setup.Env.ExecOnSource(triggerRegexCommand.String()); err != nil { - return ctx, err - } - return context.WithValue(ctx, config.StartTimeContextKey, int32(startTime)), nil -} diff --git a/test/engine/trigger/regex_test.go b/test/engine/trigger/regex_test.go index 119308f5d6..ff7c998068 100644 --- a/test/engine/trigger/regex_test.go +++ b/test/engine/trigger/regex_test.go @@ -14,12 +14,16 @@ package trigger import ( + "bytes" "fmt" "io" "os" "strconv" "testing" "time" + + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/transform" ) // TestGenerateRegexLogSingle will be executed in the environment being collected. @@ -64,3 +68,55 @@ func TestGenerateRegexLogSingle(t *testing.T) { } } } + +// TestGenerateRegexLogSingleGBK will be executed in the environment being collected. +func TestGenerateRegexLogSingleGBK(t *testing.T) { + gneratedLogDir := getEnvOrDefault("GENERATED_LOG_DIR", "/tmp/ilogtail") + totalLog, err := strconv.Atoi(getEnvOrDefault("TOTAL_LOG", "100")) + if err != nil { + t.Fatalf("parse TOTAL_LOG failed: %v", err) + return + } + interval, err := strconv.Atoi(getEnvOrDefault("INTERVAL", "1")) + if err != nil { + t.Fatalf("parse INTERVAL failed: %v", err) + return + } + fileName := getEnvOrDefault("FILENAME", "regex_single.log") + + encoder := simplifiedchinese.GBK.NewEncoder() + testLogConentUTF8 := []string{ + `- file2:1 127.0.0.1 - [2024-01-07T12:40:10.505120] "HEAD / HTTP/2.0" 302 809 "未知" "这是一条消息,password:123456"`, + `- file2:2 127.0.0.1 - [2024-01-07T12:40:11.392101] "GET /index.html HTTP/2.0" 200 139 "Mozilla/5.0" "这是一条消息,password:123456,这是第二条消息,password:00000"`, + `- file2:3 10.45.26.0 - [2024-01-07T12:40:12.359314] "PUT /index.html HTTP/1.1" 200 913 "curl/7.10" "这是一条消息"`, + `- file2:4 192.168.0.3 - [2024-01-07T12:40:13.002661] "PUT /dir/resource.txt HTTP/2.0" 501 355 "go-sdk" "这是一条消息,password:123456"`, + } + testLogConent := make([]string, 0, len(testLogConentUTF8)) + for _, log := range testLogConentUTF8 { + data, err1 := io.ReadAll(transform.NewReader(bytes.NewBuffer([]byte(log)), encoder)) + if err1 != nil { + t.Fatalf("encode log failed: %v", err1) + } + testLogConent = append(testLogConent, string(data)) + } + file, err := os.OpenFile(fmt.Sprintf("%s/%s", gneratedLogDir, fileName), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + t.Fatalf("open file failed: %v", err) + return + } + defer file.Close() + + logIndex := 0 + for i := 0; i < totalLog; i++ { + _, err := io.WriteString(file, testLogConent[logIndex]+"\n") + if err != nil { + t.Fatalf("write log failed: %v", err) + return + } + time.Sleep(time.Duration(interval * int(time.Millisecond))) + logIndex++ + if logIndex >= len(testLogConent) { + logIndex = 0 + } + } +} diff --git a/test/engine/trigger/trigger.go b/test/engine/trigger/trigger.go new file mode 100644 index 0000000000..9d2fd0f81c --- /dev/null +++ b/test/engine/trigger/trigger.go @@ -0,0 +1,67 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package trigger + +import ( + "context" + "html/template" + "strings" + "time" + + "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/setup" +) + +const triggerRegexTemplate = "cd {{.WorkDir}} && TOTAL_LOG={{.TotalLog}} INTERVAL={{.Interval}} FILENAME={{.Filename}} GENERATED_LOG_DIR={{.GeneratedLogDir}} {{.Command}}" + +func RegexSingle(ctx context.Context, totalLog int, path string, interval int) (context.Context, error) { + return generate(ctx, totalLog, path, interval, "TestGenerateRegexLogSingle") +} + +func RegexSingleGBK(ctx context.Context, totalLog int, path string, interval int) (context.Context, error) { + return generate(ctx, totalLog, path, interval, "TestGenerateRegexLogSingleGBK") +} + +func Apsara(ctx context.Context, totalLog int, path string, interval int) (context.Context, error) { + return generate(ctx, totalLog, path, interval, "TestGenerateApsara") +} + +func DelimiterSingle(ctx context.Context, totalLog int, path string, interval int) (context.Context, error) { + return generate(ctx, totalLog, path, interval, "TestGenerateDelimiterSingle") +} + +func generate(ctx context.Context, totalLog int, path string, interval int, commandName string) (context.Context, error) { + time.Sleep(3 * time.Second) + command := getRunTriggerCommand(commandName) + var triggerRegexCommand strings.Builder + template := template.Must(template.New("trigger").Parse(triggerRegexTemplate)) + splittedPath := strings.Split(path, "/") + dir := strings.Join(splittedPath[:len(splittedPath)-1], "/") + filename := splittedPath[len(splittedPath)-1] + if err := template.Execute(&triggerRegexCommand, map[string]interface{}{ + "WorkDir": config.TestConfig.WorkDir, + "TotalLog": totalLog, + "Interval": interval, + "GeneratedLogDir": dir, + "Filename": filename, + "Command": command, + }); err != nil { + return ctx, err + } + startTime := time.Now().Unix() + if err := setup.Env.ExecOnSource(ctx, triggerRegexCommand.String()); err != nil { + return ctx, err + } + return context.WithValue(ctx, config.StartTimeContextKey, int32(startTime)), nil +} diff --git a/test/engine/verify/apsara.go b/test/engine/verify/apsara.go new file mode 100644 index 0000000000..220cebd3bb --- /dev/null +++ b/test/engine/verify/apsara.go @@ -0,0 +1,94 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/avast/retry-go/v4" + + "github.com/alibaba/ilogtail/pkg/protocol" + "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/control" + "github.com/alibaba/ilogtail/test/engine/setup/subscriber" +) + +func Apsara(ctx context.Context) (context.Context, error) { + var from int32 + value := ctx.Value(config.StartTimeContextKey) + if value != nil { + from = value.(int32) + } else { + return ctx, fmt.Errorf("no start time") + } + fields := []string{"__FILE__", "__LEVEL__", "__LINE__", "__THREAD__", "file", "logNo", "mark", "microtime", "msg"} + timeoutCtx, cancel := context.WithTimeout(context.TODO(), config.TestConfig.RetryTimeout) + defer cancel() + var groups []*protocol.LogGroup + var err error + err = retry.Do( + func() error { + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) + if err != nil { + return err + } + for _, group := range groups { + for _, log := range group.Logs { + for _, field := range fields { + found := false + for _, content := range log.Contents { + if content.Key == field { + found = true + break + } + } + if !found { + return fmt.Errorf("field %s not found", field) + } + } + // validate time parse + var microtime int64 + var recordTime int64 + var nanoTime int64 + for _, content := range log.Contents { + if content.Key == "microtime" { + microtime, _ = strconv.ParseInt(content.Value, 10, 64) + } + if content.Key == "__time__" { + recordTime, _ = strconv.ParseInt(content.Value, 10, 64) + } + if content.Key == "__time_ns_part__" { + nanoTime, _ = strconv.ParseInt(content.Value, 10, 64) + } + } + if microtime != recordTime*1000000+nanoTime/1000 { + return fmt.Errorf("time parse error, microtime: %d, recordtime: %d, nanotime: %d", microtime, recordTime, nanoTime) + } + } + } + return err + }, + retry.Context(timeoutCtx), + retry.Delay(5*time.Second), + retry.DelayType(retry.FixedDelay), + ) + if err != nil { + return ctx, err + } + return ctx, nil +} diff --git a/test/engine/verify/count.go b/test/engine/verify/count.go index 7a21cb1e21..ea5052d350 100644 --- a/test/engine/verify/count.go +++ b/test/engine/verify/count.go @@ -22,6 +22,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/control" "github.com/alibaba/ilogtail/test/engine/setup/subscriber" ) @@ -41,7 +42,7 @@ func LogCount(ctx context.Context, expect int) (context.Context, error) { err = retry.Do( func() error { count = 0 - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) if err != nil { return err } @@ -49,7 +50,7 @@ func LogCount(ctx context.Context, expect int) (context.Context, error) { count += len(group.Logs) } if count != expect { - return fmt.Errorf("log count not match, expect %d, got %d", expect, count) + return fmt.Errorf("log count not match, expect %d, got %d, from %d", expect, count, from) } if expect == 0 { return fmt.Errorf("log count is 0") @@ -69,6 +70,45 @@ func LogCount(ctx context.Context, expect int) (context.Context, error) { return ctx, nil } +func MetricCount(ctx context.Context, expect int, duration int64) (context.Context, error) { + timeoutCtx, cancel := context.WithTimeout(context.TODO(), config.TestConfig.RetryTimeout) + defer cancel() + var groups []*protocol.LogGroup + var err error + var count int + err = retry.Do( + func() error { + count = 0 + currTime := time.Now().Unix() + lastScrapeTime := int32(currTime - duration) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), lastScrapeTime) + if err != nil { + return err + } + for _, group := range groups { + count += len(group.Logs) + } + if count < expect { + return fmt.Errorf("metric count not match, expect %d, got %d, from %d", expect, count, lastScrapeTime) + } + if expect == 0 { + return fmt.Errorf("metric count is 0") + } + return nil + }, + retry.Context(timeoutCtx), + retry.Delay(5*time.Second), + retry.DelayType(retry.FixedDelay), + ) + if expect == 0 && count == expect { + return ctx, nil + } + if err != nil { + return ctx, err + } + return ctx, nil +} + func LogCountAtLeast(ctx context.Context, expect int) (context.Context, error) { var from int32 value := ctx.Value(config.StartTimeContextKey) @@ -85,7 +125,7 @@ func LogCountAtLeast(ctx context.Context, expect int) (context.Context, error) { err = retry.Do( func() error { count = 0 - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) if err != nil { return err } @@ -113,7 +153,7 @@ func LogCountAtLeast(ctx context.Context, expect int) (context.Context, error) { return ctx, nil } -func LogCountAtLeastWithFilter(ctx context.Context, expect int, filterKey string, filterValue string) (context.Context, error) { +func LogCountAtLeastWithFilter(ctx context.Context, sql string, expect int, filterKey string, filterValue string) (context.Context, error) { var from int32 value := ctx.Value(config.StartTimeContextKey) if value != nil { @@ -129,7 +169,7 @@ func LogCountAtLeastWithFilter(ctx context.Context, expect int, filterKey string err = retry.Do( func() error { count = 0 - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) if err != nil { return err } diff --git a/test/engine/verify/log_context.go b/test/engine/verify/log_context.go index f10cd27b61..d763eccd16 100644 --- a/test/engine/verify/log_context.go +++ b/test/engine/verify/log_context.go @@ -24,6 +24,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/control" "github.com/alibaba/ilogtail/test/engine/setup/subscriber" ) @@ -49,7 +50,7 @@ func LogContext(ctx context.Context) (context.Context, error) { var groups []*protocol.LogGroup err = retry.Do( func() error { - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) return err }, retry.Context(timeoutCtx), diff --git a/test/engine/verify/log_field.go b/test/engine/verify/log_field.go index 6a23ebf2e0..01a766d1e2 100644 --- a/test/engine/verify/log_field.go +++ b/test/engine/verify/log_field.go @@ -25,6 +25,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/control" "github.com/alibaba/ilogtail/test/engine/setup/subscriber" ) @@ -44,7 +45,7 @@ func LogField(ctx context.Context, expectFieldStr string) (context.Context, erro var groups []*protocol.LogGroup err = retry.Do( func() error { - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) return err }, retry.Context(timeoutCtx), @@ -68,7 +69,7 @@ func LogField(ctx context.Context, expectFieldStr string) (context.Context, erro goto find } } - return ctx, fmt.Errorf("want contains field %s, but not found", field) + return ctx, fmt.Errorf("want contains field %s, but not found, current: %s", field, log.Contents) find: } } @@ -92,7 +93,7 @@ func LogFieldKV(ctx context.Context, expectKeyValuesStr string) (context.Context var groups []*protocol.LogGroup err = retry.Do( func() error { - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) return err }, retry.Context(timeoutCtx), diff --git a/test/engine/verify/log_label.go b/test/engine/verify/log_label.go index 98f0b89a9d..0af6eee378 100644 --- a/test/engine/verify/log_label.go +++ b/test/engine/verify/log_label.go @@ -25,6 +25,7 @@ import ( "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/protocol" "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/control" "github.com/alibaba/ilogtail/test/engine/setup/subscriber" ) @@ -44,7 +45,7 @@ func LogLabel(ctx context.Context, expectLabelsStr string) (context.Context, err var groups []*protocol.LogGroup err = retry.Do( func() error { - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) return err }, retry.Context(timeoutCtx), diff --git a/test/engine/verify/log_tag.go b/test/engine/verify/log_tag.go index 6dee0a779f..918aac59d8 100644 --- a/test/engine/verify/log_tag.go +++ b/test/engine/verify/log_tag.go @@ -24,6 +24,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/control" "github.com/alibaba/ilogtail/test/engine/setup/subscriber" ) @@ -43,7 +44,7 @@ func TagKV(ctx context.Context, expectKeyValuesStr string) (context.Context, err var groups []*protocol.LogGroup err = retry.Do( func() error { - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) return err }, retry.Context(timeoutCtx), diff --git a/test/engine/verify/regex.go b/test/engine/verify/regex.go index 1ee6c42dbf..45da3962de 100644 --- a/test/engine/verify/regex.go +++ b/test/engine/verify/regex.go @@ -22,6 +22,7 @@ import ( "github.com/alibaba/ilogtail/pkg/protocol" "github.com/alibaba/ilogtail/test/config" + "github.com/alibaba/ilogtail/test/engine/control" "github.com/alibaba/ilogtail/test/engine/setup/subscriber" ) @@ -33,14 +34,14 @@ func RegexSingle(ctx context.Context) (context.Context, error) { } else { return ctx, fmt.Errorf("no start time") } - fields := []string{"mark", "file", "logno", "ip", "time", "method", "url", "http", "status", "size", "useragent", "msg"} + fields := []string{"mark", "file", "logNo", "ip", "time", "method", "url", "http", "status", "size", "userAgent", "msg"} timeoutCtx, cancel := context.WithTimeout(context.TODO(), config.TestConfig.RetryTimeout) defer cancel() var groups []*protocol.LogGroup var err error err = retry.Do( func() error { - groups, err = subscriber.TestSubscriber.GetData(from) + groups, err = subscriber.TestSubscriber.GetData(control.GetQuery(ctx), from) if err != nil { return err } diff --git a/test/go.mod b/test/go.mod index bde1615225..7535b25823 100644 --- a/test/go.mod +++ b/test/go.mod @@ -90,6 +90,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/onsi/gomega v1.19.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect diff --git a/test/go.sum b/test/go.sum index 99678b0499..34c917a9a0 100644 --- a/test/go.sum +++ b/test/go.sum @@ -822,6 +822,7 @@ github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+t github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=