From c272d9f5a53fda09019523ca3e6fa8ed87b71446 Mon Sep 17 00:00:00 2001 From: Kaituo Li Date: Fri, 29 Mar 2024 17:44:28 -0700 Subject: [PATCH] Parameter tuning Signed-off-by: Kaituo Li --- .../opensearch/ad/caching/PriorityCache.java | 2 +- .../ad/task/ADTaskCacheManager.java | 11 +- .../opensearch/timeseries/MemoryTracker.java | 281 ++++++++++++------ .../settings/TimeSeriesSettings.java | 94 +++++- ...ndexAnomalyDetectorActionHandlerTests.java | 1 - ...TrackerTests.java => TRCFMemoryTests.java} | 100 +------ .../ad/caching/PriorityCacheTests.java | 4 +- .../forecast/CasterMemoryTests.java | 224 ++++++++++++++ .../timeseries/AbstractMemoryTrackerTest.java | 90 ++++++ 9 files changed, 619 insertions(+), 188 deletions(-) rename src/test/java/org/opensearch/ad/{MemoryTrackerTests.java => TRCFMemoryTests.java} (76%) create mode 100644 src/test/java/org/opensearch/forecast/CasterMemoryTests.java create mode 100644 src/test/java/org/opensearch/timeseries/AbstractMemoryTrackerTest.java diff --git a/src/main/java/org/opensearch/ad/caching/PriorityCache.java b/src/main/java/org/opensearch/ad/caching/PriorityCache.java index 40e28975d..810b5e62c 100644 --- a/src/main/java/org/opensearch/ad/caching/PriorityCache.java +++ b/src/main/java/org/opensearch/ad/caching/PriorityCache.java @@ -496,7 +496,7 @@ private long getRequiredMemory(AnomalyDetector detector, int numberOfEntity) { numberOfTrees, TimeSeriesSettings.REAL_TIME_BOUNDING_BOX_CACHE_RATIO, detector.getShingleSize().intValue(), - true + TimeSeriesSettings.NUM_SAMPLES_PER_TREE ); } diff --git a/src/main/java/org/opensearch/ad/task/ADTaskCacheManager.java b/src/main/java/org/opensearch/ad/task/ADTaskCacheManager.java index 014a9f798..8d98d3ae3 100644 --- a/src/main/java/org/opensearch/ad/task/ADTaskCacheManager.java +++ b/src/main/java/org/opensearch/ad/task/ADTaskCacheManager.java @@ -323,7 +323,7 @@ private long calculateADTaskCacheSize(ADTask adTask) { TimeSeriesSettings.NUM_TREES, TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO, detector.getShingleSize().intValue(), - false + TimeSeriesSettings.NUM_SAMPLES_PER_TREE ) + shingleMemorySize(detector.getShingleSize(), detector.getEnabledFeatureIds().size()); } @@ -339,7 +339,14 @@ public long getModelSize(String taskId) { RandomCutForest rcfForest = tRCF.getForest(); int dimensions = rcfForest.getDimensions(); int numberOfTrees = rcfForest.getNumberOfTrees(); - return memoryTracker.estimateTRCFModelSize(dimensions, numberOfTrees, TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO, 1, false); + return memoryTracker + .estimateTRCFModelSize( + dimensions, + numberOfTrees, + TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO, + 1, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE + ); } /** diff --git a/src/main/java/org/opensearch/timeseries/MemoryTracker.java b/src/main/java/org/opensearch/timeseries/MemoryTracker.java index 1599960b3..dea956476 100644 --- a/src/main/java/org/opensearch/timeseries/MemoryTracker.java +++ b/src/main/java/org/opensearch/timeseries/MemoryTracker.java @@ -25,6 +25,7 @@ import org.opensearch.timeseries.common.exception.LimitExceededException; import com.amazon.randomcutforest.RandomCutForest; +import com.amazon.randomcutforest.parkservices.RCFCaster; import com.amazon.randomcutforest.parkservices.ThresholdedRandomCutForest; /** @@ -136,117 +137,211 @@ private void adjustOriginMemoryRelease(long memoryToConsume, Origin origin, Map< } /** - * Gets the estimated size of an entity's model. + * Gets the estimated size (bytes) of a TRCF model. * * RCF size: - * Assume the sample size is 256. I measured the memory size of a ThresholdedRandomCutForest - * using heap dump. A ThresholdedRandomCutForest comprises a compact rcf model and - * a threshold model. + * I measured the memory size of a ThresholdedRandomCutForest/RCFCaster using heap dump. + * Internal shingling is required. A ThresholdedRandomCutForest/RCFCaster comprises rcf + * model, threshold model, and other fields. + * + * Symbols: + * b base dimension + * s shingle size + * d dimension = b * s + * r {@code point store constant = IF(s*0.05>1,s*0.05,IF(s>1,1.5,1)} + * t # trees + * ss sample size + * br bounding box ratio + * c max capacity of point store max(sampleSize * numberOfTrees + 1, 2 * sampleSize), defined in RandomCutForest + * pt {@code location list constant = 2 or 4. If shingleSize * capacity < Character.MAX_VALUE use PointStoreSmall, * 2; otherwise use PointStoreLarge, *4} + * be {@code br > 0} + * ci number of internal nodes = ss - 1 + * nt {@code node store type = ci<256&&d<=256 => NodeStoreSmall + ci<65535&&d<=65535 => NodeStoreMedium + otherwise => NodeStoreLarge} + * ns {@code node store size = IF(AND(ci<256,d<=256),10*ss + 208,IF(AND(ci<65535,d<=65535),16*s + 202,20*s + 198))} (direct translation of RCF logic) * * A compact RCF forest consists of: * - Random number generator: 56 bytes * - PointStoreCoordinator: 24 bytes * - SequentialForestUpdateExecutor: 24 bytes * - SequentialForestTraversalExecutor: 16 bytes - * - PointStoreFloat - * + IndexManager - * - int array for free indexes: 256 * numberOfTrees * 4, where 4 is the size of an integer - * - two int array for locationList and refCount: 256 * numberOfTrees * 4 bytes * 2 - * - a float array for data store: 256 * trees * dimension * 4 bytes: due to various - * optimization like shingleSize(dimensions), we don't use all of the array. The average - * usage percentage depends on shingle size and if internal shingling is enabled. + * + PointStoreFloat: ss * t * 4 * r * b+ss * t * pt+ss * t +1776+48+s*4 + * - IndexManager + HashMap+shingleSize*4: 1776+48+shingleSize*4 bytes + * - refCount: ss * trees * 1 (*1 since refCount is of byte[]) + * - locationList: ss * trees * pt + * - a float array for data store: ss * # trees * 4 bytes * point store constant * b, + * where ss * # trees is the maximum allowed points in the forest; * 4 since float is of 4 bytes. + * since internal shingling is enabled, we don't use all of the array and need to multiply by + * some factor to account for saved space. + * + * The average usage percentage depends on shingle size and if internal shingling is enabled. * I did experiments with power-of-two shingle sizes and internal shingling on/off * by running ThresholdedRandomCutForest over a million points. * My experiment shows that - * * if internal shingling is off, data store is filled at full + * - if internal shingling is off, data store is filled at full * capacity. - * * otherwise, data store usage depends on shingle size: - * - * Shingle Size usage - * 1 1 - * 2 0.53 - * 4 0.27 - * 8 0.27 - * 16 0.13 - * 32 0.07 - * 64 0.07 + * - otherwise, data store usage depends on shingle size: + * {@code IF(s*0.05>1,s*0.05,IF(s>1,1.5,1) } * - * The formula reflects the data and fits the point store usage to the closest power-of-two case. - * For example, if shingle size is 17, we use the usage 0.13 since it is closer to 16. - * - * {@code IF(dimensions>=32, 1/(LOG(dimensions+1, 2)+LOG(dimensions+1, 10)), 1/LOG(dimensions+1, 2))} - * where LOG gets the logarithm of a number and the syntax of LOG is {@code LOG (number, [base])}. - * We derive the formula by observing the point store usage ratio is a decreasing function of dimensions - * and the relationship is logarithm. Adding 1 to dimension to ensure dimension 1 results in a ratio 1. - * - ComponentList: an array of size numberOfTrees - * + SamplerPlusTree - * - CompactSampler: 2248 - * + CompactRandomCutTreeFloat - * - other fields: 152 - * - SmallNodeStore (small node store since our sample size is 256, less than the max of short): 6120 + * - ComponentList: an array of size numberOfTrees = (2*(ss* 4 + 16)+80+88 + (ns+128+(br*ss*d*8+16*be) + (br*ss*8+16*be)) + 24) * t + * + SamplerPlusTree = (2*(ss* 4 + 16)+80+88 + (ns+128+(br*ss*d*8+16*be) + (br*ss*8+16*be)) + 24) + * - other: 24 + * + CompactSampler: 2*(ss* 4 + 16)+80+88 + * - weight: sample size* 4 + 16 + * - pointIndex: sample size* 4 + 16 + * - evictedPoint: 48 + * - random: 32 + * + RandomCutTree: ns+128+(br*ss*d*8+16*be) + (br*ss*8+16*be) + * - other fields: 80 + * - leafMass: 48 + * + NodeStore (ss-1)*4+20+(ss-1)*2+18+(ss-1)*2+18+(ss-1)+17+(ss-1)+17+80+48 + * - cutValue: (sample size-1)*4+20 + * - freeNodeManager: 80 + * The following fields are organized on node store type + * NodeStoreSmall NodeStoreMedium NodeStoreLarge + * - leftIndex (sample size-1)*2+18 (sample size-1)*4+18 (sample size-1)*4+18 + * - rightIndex (sample size-1)*2+18 (sample size-1)*4+18 (sample size-1)*4+18 + * - cutDimension (sample size-1)+17 (sample size-1)*2+17 (sample size-1)*4+17 + * - mass (sample size-1)+17 (sample size-1)*2+17 (sample size-1)*4+17 * + BoxCacheFloat * - other: 104 - * - BoundingBoxFloat: (1040 + 255* ((dimension * 4 + 16) * 2 + 32)) * actual bounding box cache usage, - * {@code actual bounding box cache usage = (bounding box cache fraction >= 0.3? 1: bounding box cache fraction)} - * {@code >= 0.3} we will still initialize bounding box cache array of the max size. - * 1040 is the size of BoundingBoxFloat's fields unrelated to tree size (255 nodes in our formula) - * In total, RCF size is - * 56 + # trees * (2248 + 152 + 6120 + 104 + (1040 + 255* (dimension * 4 + 16) * 2 + 32)) * adjusted bounding box cache ratio) + - * (256 * # trees * 2 + 256 * # trees * dimension) * 4 bytes * point store ratio + 30744 * 2 + 15432 + 208) + 24 + 24 + 16 - * = 56 + # trees * (8624 + (1040 + 255 * (dimension * 8 + 64)) * actual bounding box cache usage) + 256 * # trees * - * dimension * 4 * point store ratio + 77192 + * - BoundingBoxFloat: {@code bounding box ratio * ss * dimension* 2 * 4 + (bounding box ratio > 0 ? 16 : 0) } + * - rangeSumData: {@code br * ss * 8 + (bounding box ratio > 0 ? 16 : 0) } + * * * Thresholder size - * + Preprocessor: - * - lastShingledInput and lastShingledPoint: 2*(dimension*8 + 16) (2 due to 2 double arrays, 16 are array object size) - * - previousTimeStamps: shingle*8 - * - other: 248 - * - BasicThrehsolder: 256 - * + lastAnomalyAttribution: - * - high and low: 2*(dimension*8 + 16)(2 due to 2 double arrays, 16 are array object) - * - other 24 - * - lastAnomalyPoint and lastExpectedPoint: 2*(dimension*8 + 16) - * - other like ThresholdedRandomCutForest object size: 96 - * In total, thresholder size is: - * 6*(dimension*8 + 16) + shingle*8 + 248 + 256 + 24 + 96 - * = 6*(dimension*8 + 16) + shingle*8 + 624 + * + Preprocessor: 280+d*8+16+24+280+72+3*(d*8 + 16)+16+128 + * + transformer: 280+dimension*8+16+24 (24 is object size) + * - deviations = 280 + * - weights = dimension*8+16 + * - timeStampDeviations = 280 + * - dataQuality = 72 + * - lastShingledInput, previousTimeStamps and lastShingledPoint = 3*(dimension*8 + 16) (3 due to 2 double arrays, 16 are array object size) + * - stopNormalization = 16 + * - other: 128 + * + PredictorCorrector: 472+4*(8*d+16)+184+(48*b+24)*2+32+96+104 + * - thresholders: 472 + * + lastDescriptor: 4*(8*dimension+16)+184 + * - currentInput: 8*dimension+16(16 is array size) + * - RCFPoint: 8*dimension+16(16 is array size) + * - shift: 8*dimension+16(16 is array size) + * - scale: 8*dimension+16(16 is array size) + * - other: 184 + * - deviationsActual: 48*base dimension+24 (24 is object size) + * - deviationsExpected: 48*base dimension+24 (24 is object size) + * - lastScore: 32 + * - 4 ignores array: 96 + * + lastAnomalyDescriptor: 24 + (b * 8+16)*7 + * - attribution: 2 * (b * 8+16) + 24 + * - high: basic dimension * 8+16 + * - low: basic dimension * 8+16 + * - currentInput: basic dimension * 8+16 + * - RCFPoint: d * 8+16 + * - scale: basic dimension * 8+16 + * - shift: basic dimension * 8+16 + * - postShift: basic dimension * 8+16 + * + * Total: 152*b + 4*d*r*ss*t + 64*d + pt*ss*t + 4*s + ss*t + t*(32*be + 8*br*d*ss + 8*br*ss + ns + 8*ss + 352) + 3944 * * @param dimension The number of feature dimensions in RCF * @param numberOfTrees The number of trees in RCF * @param boundingBoxCacheFraction Bounding box cache usage in RCF * @param shingleSize shingle size - * @param internalShingling whether internal shingling is enabled or not + * @param sampleSize sample size * @return estimated TRCF model size * - * @throws IllegalArgumentException when the input shingle size is out of range [1, 64] */ - public long estimateTRCFModelSize( - int dimension, - int numberOfTrees, - double boundingBoxCacheFraction, - int shingleSize, - boolean internalShingling - ) { - double averagePointStoreUsage = 0; - if (!internalShingling || shingleSize == 1) { - averagePointStoreUsage = 1; - } else if (shingleSize <= 3) { - averagePointStoreUsage = 0.53; - } else if (shingleSize <= 12) { - averagePointStoreUsage = 0.27; - } else if (shingleSize <= 24) { - averagePointStoreUsage = 0.13; + public long estimateTRCFModelSize(int dimension, int numberOfTrees, double boundingBoxCacheFraction, int shingleSize, int sampleSize) { + double baseDimension = dimension / shingleSize; + // rounding it up to the next power of two, in terms of selecting the pointStoreSizeConstant. T + double pointStoreSizeConstant = 1; + if (shingleSize == 1) { + pointStoreSizeConstant = 1; + } else if (shingleSize == 2) { + pointStoreSizeConstant = 0.53; + } else if (shingleSize <= 4) { + pointStoreSizeConstant = 0.27; + } else if (shingleSize <= 8) { + pointStoreSizeConstant = 0.18; + } else if (shingleSize <= 16) { + pointStoreSizeConstant = 0.13; + } else if (shingleSize <= 32) { + pointStoreSizeConstant = 0.07; } else if (shingleSize <= 64) { - averagePointStoreUsage = 0.07; + pointStoreSizeConstant = 0.05; + } else if (shingleSize <= 128) { + pointStoreSizeConstant = 0.05; } else { throw new IllegalArgumentException("out of range shingle size " + shingleSize); } - double actualBoundingBoxUsage = boundingBoxCacheFraction >= 0.3 ? 1d : boundingBoxCacheFraction; - long compactRcfSize = (long) (56 + numberOfTrees * (8624 + (1040 + 255 * (dimension * 8 + 64)) * actualBoundingBoxUsage) + 256 - * numberOfTrees * dimension * 4 * averagePointStoreUsage + 77192); - long thresholdSize = 6 * (dimension * 8 + 16) + shingleSize * 8 + 624; - return compactRcfSize + thresholdSize; + int pointStoreCapacity = Math.max(sampleSize * numberOfTrees + 1, 2 * sampleSize); + int pointStoreTypeConstant = shingleSize * pointStoreCapacity >= Character.MAX_VALUE ? 4 : 2; + int boundingBoxExistsConstant = boundingBoxCacheFraction > 0 ? 1 : 0; + + int nodeStoreSize = 0; + int numberOfInternalNodes = sampleSize - 1; + if (numberOfInternalNodes < 256 && dimension <= 256) { + // NodeStoreSmall + nodeStoreSize = 10 * sampleSize + 208; + } else if (numberOfInternalNodes < 65535 && dimension <= 65535) { + // NodeStoreMedium + nodeStoreSize = 16 * sampleSize + 202; + } else { + // NodeStoreLarge + nodeStoreSize = 20 * sampleSize + 198; + } + // NodeStoreLarge + return (long) (152 * baseDimension + 4 * dimension * pointStoreSizeConstant * sampleSize * numberOfTrees + 64 * dimension + + pointStoreTypeConstant * sampleSize * numberOfTrees + 4 * shingleSize + sampleSize * numberOfTrees + numberOfTrees * (32 + * boundingBoxExistsConstant + 8 * boundingBoxCacheFraction * dimension * sampleSize + 8 * boundingBoxCacheFraction + * sampleSize + nodeStoreSize + 8 * sampleSize + 352) + 3944); + } + + /** + * Gets the estimated size (bytes) of a RCFCaster model. On top of trcf model, RCFCaster adds an ErrorHandler. + * + * Symbols: + * b base dimension + * h horizon + * + * ErrorHandler size: + * - pastForecasts: h*(3*(l*4+16)+24), h RangeVector, we have 3 float array in RangeVector, + * and each float array is of size l, 16 is float array object size, 24 is RangeVector object size + * - rmseLowDeviations: l * 48 + 784 , l Deviation, each Deviation is of size 48, 784 is Deviation array size + * - rmseHighDeviations: l * 48 + 784, similar to rmseLowDeviations + * intervalPrecision l * 48 + 784 similar to rmseLowDeviations + * errorRMSE 2*(l*8+14)+24 2 double array of size l, plus 14 bytes for each array object; 24 is DiVector object size + * errorDistribution (3*(l*4+16)+24) Similar to pastForecasts, with only 1 RangeVector + * errorMean 4*l+16 a float array of size l, plus array object size 16 + * lastInputs 8*2*b+16 a double array of size 2*b, plus double array object size 16 + * lastDataDeviations 4*b+16 a float array of size b, plus array object size 16 + * upperLimit 4*b+16 similar to lastDataDeviations + * lowerLimit 4*b+16 similar to lastDataDeviations + * + * Total: 176*b*h + 28*b + 12*h*(b*h + 6) + 2556 + * + * @param dimension The number of feature dimensions in RCF + * @param numberOfTrees The number of trees in RCF + * @param boundingBoxCacheFraction Bounding box cache usage in RCF + * @param shingleSize shingle size + * @param sampleSize sample size + * @param horizon Forecast horizon + * @return estimated RCFCaster model size + */ + public long estimateCasterModelSize( + int dimension, + int numberOfTrees, + double boundingBoxCacheFraction, + int shingleSize, + int sampleSize, + int horizon + ) { + long trcfModelSize = estimateTRCFModelSize(dimension, numberOfTrees, boundingBoxCacheFraction, shingleSize, sampleSize); + double baseDimension = dimension / shingleSize; + double errorHandlerSize = 176 * baseDimension * horizon + 28 * baseDimension + 12 * horizon * (baseDimension * horizon + 6) + 2556; + return (long) (trcfModelSize + errorHandlerSize); } /** @@ -290,7 +385,7 @@ public synchronized boolean syncMemoryState(Origin origin, long totalBytes, long .format( Locale.ROOT, "Memory states do not match. Recorded: total bytes %d, reserved bytes %d." - + "Actual: total bytes %d, reserved bytes: %d", + + " Actual: total bytes %d, reserved bytes: %d", recordedTotalBytes, recordedReservedBytes, totalBytes, @@ -348,7 +443,7 @@ public synchronized boolean isHostingAllowed(String configId, ThresholdedRandomC } /** - * Gets the estimated size of an entity's model. + * Gets the estimated size (bytes) of a TRCF model. * * @param trcf ThresholdedRandomCutForest object * @return estimated model size in bytes @@ -360,7 +455,25 @@ public long estimateTRCFModelSize(ThresholdedRandomCutForest trcf) { forest.getNumberOfTrees(), forest.getBoundingBoxCacheFraction(), forest.getShingleSize(), - forest.isInternalShinglingEnabled() + forest.getSampleSize() + ); + } + + /** + * Gets the estimated size (bytes) of a RCFCaster model. + * + * @param caster RCFCaster object + * @return estimated model size in bytes + */ + public long estimateCasterModelSize(RCFCaster caster) { + RandomCutForest forest = caster.getForest(); + return estimateCasterModelSize( + forest.getDimensions(), + forest.getNumberOfTrees(), + forest.getBoundingBoxCacheFraction(), + forest.getShingleSize(), + forest.getSampleSize(), + caster.getForecastHorizon() ); } } diff --git a/src/main/java/org/opensearch/timeseries/settings/TimeSeriesSettings.java b/src/main/java/org/opensearch/timeseries/settings/TimeSeriesSettings.java index 56bbe187a..c4f3a5b39 100644 --- a/src/main/java/org/opensearch/timeseries/settings/TimeSeriesSettings.java +++ b/src/main/java/org/opensearch/timeseries/settings/TimeSeriesSettings.java @@ -19,17 +19,43 @@ public class TimeSeriesSettings { // max shingle size we have seen from external users // the larger shingle size, the harder to fill in a complete shingle - public static final int MAX_SHINGLE_SIZE = 60; + public static final int MAX_SHINGLE_SIZE = 64; + + // shingle size = seasonality / 2 + public static final int SEASONALITY_TO_SHINGLE_RATIO = 2; public static final String CONFIG_INDEX_MAPPING_FILE = "mappings/config.json"; public static final String JOBS_INDEX_MAPPING_FILE = "mappings/job.json"; - // 100,000 insertions costs roughly 1KB. + /** + * Memory Usage Estimation for a Map<String, Integer> with 100,000 entries: + * + * 1. HashMap Object Overhead: This can vary, but let's assume it's about 36 bytes. + * 2. Array Overhead: + * - The array size will be the nearest power of 2 greater than or equal to 100,000 / load factor. + * - Assuming a load factor of 0.75, the array size will be 2^17 = 131,072. + * - The memory usage will be 131,072 * 4 bytes = 524,288 bytes. + * 3. Entry Overhead: Each entry has an overhead of about 32 bytes (object header, hash code, and three references). + * 4. Key Overhead: + * - Each key has an overhead of about 36 bytes (object header, length, hash cache) plus the character data. + * - Assuming the character data is 64 bytes, the total key overhead per entry is 100 bytes. + * 5. Value Overhead: Each Integer object has an overhead of about 16 bytes (object header plus int value). + * + * Total Memory Usage Formula: + * Total Memory Usage = HashMap Object Overhead + Array Overhead + + * (Entry Overhead + Key Overhead + Value Overhead) * Number of Entries + * + * Plugging in the numbers: + * Total Memory Usage = 36 + 524,288 + (32 + 100 + 16) * 100,000 + * ≈ 14,965 kilobytes (≈ 15 MB) + * + * Note: + * This estimation is quite simplistic and the actual memory usage may be different based on the JVM implementation, + * the actual Map implementation being used, and other factors. + */ public static final int DOOR_KEEPER_FOR_COLD_STARTER_MAX_INSERTION = 100_000; - public static final double DOOR_KEEPER_FALSE_POSITIVE_RATE = 0.01; - // clean up door keeper every 60 intervals public static final int DOOR_KEEPER_MAINTENANCE_FREQ = 60; @@ -40,6 +66,10 @@ public class TimeSeriesSettings { // only has to do one update/scoring per interval public static final double REAL_TIME_BOUNDING_BOX_CACHE_RATIO = 0; + // max number of historical buckets for cold start. Corresponds to max buckets in OpenSearch. + // We send one query including one bucket per interval. So we don't want to surpass OS limit. + public static final int MAX_HISTORY_INTERVALS = 10000; + // ====================================== // Historical analysis // ====================================== @@ -92,7 +122,7 @@ public class TimeSeriesSettings { public static int CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES = 200_000; /** - * ResultWriteRequest consists of index request (roughly 1KB), and QueuedRequest + * ADResultWriteRequest consists of index request (roughly 1KB), and QueuedRequest * fields (148 bytes, read comments of ENTITY_REQUEST_SIZE_CONSTANT). * Plus Java object size (12 bytes), we have roughly 1160 bytes per request * @@ -104,18 +134,18 @@ public class TimeSeriesSettings { public static int RESULT_WRITE_QUEUE_SIZE_IN_BYTES = 1160; /** - * FeatureRequest has entityName (# category fields * 256, the recommended limit - * of a keyword field length), model Id (roughly 256 bytes), and QueuedRequest - * fields including config Id(roughly 128 bytes), dataStartTimeMillis (long, + * FeatureRequest has entity (max 2 category fields * 256, the recommended limit + * of a keyword field length, 512 bytes), model Id (roughly 256 bytes), runOnce + * boolean (roughly 8 bytes), dataStartTimeMillis (long, * 8 bytes), and currentFeature (16 bytes, assume two features on average). - * Plus Java object size (12 bytes), we have roughly 932 bytes per request + * Plus Java object size (12 bytes), we have roughly 812 bytes per request * assuming we have 2 categorical fields (plan to support 2 categorical fields now). * We don't want the total size exceeds 0.1% of the heap. - * We can have at most 0.1% heap / 932 = heap / 932,000. + * We can have at most 0.1% heap / 812 = heap / 812,000. * For t3.small, 0.1% heap is of 1MB. The queue's size is up to - * 10^ 6 / 932 = 1072 + * 10^ 6 / 812 = 1231 */ - public static int FEATURE_REQUEST_SIZE_IN_BYTES = 932; + public static int FEATURE_REQUEST_SIZE_IN_BYTES = 812; /** * CheckpointMaintainRequest has model Id (roughly 256 bytes), and QueuedRequest @@ -146,9 +176,9 @@ public class TimeSeriesSettings { // RCF public static final int NUM_SAMPLES_PER_TREE = 256; - public static final int NUM_TREES = 30; + public static final int NUM_TREES = 50; - public static final double TIME_DECAY = 0.0001; + public static final int DEFAULT_RECENCY_EMPHASIS = 10 * NUM_SAMPLES_PER_TREE; // If we have 32 + shingleSize (hopefully recent) values, RCF can get up and running. It will be noisy — // there is a reason that default size is 256 (+ shingle size), but it may be more useful for people to @@ -158,6 +188,11 @@ public class TimeSeriesSettings { // for a batch operation, we want all of the bounding box in-place for speed public static final double BATCH_BOUNDING_BOX_CACHE_RATIO = 1; + // feature processing + public static final int TRAIN_SAMPLE_TIME_RANGE_IN_HOURS = 24; + + public static final int MIN_TRAIN_SAMPLES = 512; + // ====================================== // Cold start setting // ====================================== @@ -209,4 +244,35 @@ public class TimeSeriesSettings { // such as "there are at least 10000 entities", the default is set to 10,000. That is, requests will count the // total entities up to 10,000. public static final int MAX_TOTAL_ENTITIES_TO_TRACK = 10_000; + + // ====================================== + // Validate Detector API setting + // ====================================== + public static final long TOP_VALIDATE_TIMEOUT_IN_MILLIS = 10_000; + + public static final double INTERVAL_BUCKET_MINIMUM_SUCCESS_RATE = 0.75; + + public static final double INTERVAL_RECOMMENDATION_INCREASING_MULTIPLIER = 1.2; + + public static final long MAX_INTERVAL_REC_LENGTH_IN_MINUTES = 60L; + + public static final int MAX_DESCRIPTION_LENGTH = 1000; + + // ====================================== + // Cache setting + // ====================================== + // We don't want to retry cold start once it exceeds the threshold. + // It is larger than 1 since cx may have ingested new data or the + // system is unstable + public static final int COLD_START_DOOR_KEEPER_COUNT_THRESHOLD = 3; + + // we don't admit model to cache before it exceeds the threshold + public static final int CACHE_DOOR_KEEPER_COUNT_THRESHOLD = 1; + + // max entities to track per detector + public static final int MAX_TRACKING_ENTITIES = 1000000; + + public static final double DOOR_KEEPER_FALSE_POSITIVE_RATE = 0.01; + + public static final double TIME_DECAY = 0.0001; } diff --git a/src/test/java/org/opensearch/action/admin/indices/mapping/get/IndexAnomalyDetectorActionHandlerTests.java b/src/test/java/org/opensearch/action/admin/indices/mapping/get/IndexAnomalyDetectorActionHandlerTests.java index aa2f30b02..9d0ec4030 100644 --- a/src/test/java/org/opensearch/action/admin/indices/mapping/get/IndexAnomalyDetectorActionHandlerTests.java +++ b/src/test/java/org/opensearch/action/admin/indices/mapping/get/IndexAnomalyDetectorActionHandlerTests.java @@ -188,7 +188,6 @@ public void testThreeCategoricalFields() throws IOException { ); } - @SuppressWarnings("unchecked") public void testMoreThanTenThousandSingleEntityDetectors() throws IOException { SearchResponse mockResponse = mock(SearchResponse.class); int totalHits = 1001; diff --git a/src/test/java/org/opensearch/ad/MemoryTrackerTests.java b/src/test/java/org/opensearch/ad/TRCFMemoryTests.java similarity index 76% rename from src/test/java/org/opensearch/ad/MemoryTrackerTests.java rename to src/test/java/org/opensearch/ad/TRCFMemoryTests.java index 631240072..e39d846a8 100644 --- a/src/test/java/org/opensearch/ad/MemoryTrackerTests.java +++ b/src/test/java/org/opensearch/ad/TRCFMemoryTests.java @@ -14,22 +14,12 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import org.opensearch.ad.model.AnomalyDetector; -import org.opensearch.ad.settings.AnomalyDetectorSettings; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; -import org.opensearch.monitor.jvm.JvmInfo; -import org.opensearch.monitor.jvm.JvmInfo.Mem; -import org.opensearch.monitor.jvm.JvmService; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.timeseries.AbstractMemoryTrackerTest; import org.opensearch.timeseries.MemoryTracker; -import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.settings.TimeSeriesSettings; @@ -37,65 +27,16 @@ import com.amazon.randomcutforest.config.TransformMethod; import com.amazon.randomcutforest.parkservices.ThresholdedRandomCutForest; -public class MemoryTrackerTests extends OpenSearchTestCase { - - int inputFeatures; - int rcfSampleSize; - int numberOfTrees; - double rcfTimeDecay; - int numMinSamples; - int shingleSize; - int dimension; - MemoryTracker tracker; +public class TRCFMemoryTests extends AbstractMemoryTrackerTest { long expectedRCFModelSize; - String detectorId; - long largeHeapSize; - long smallHeapSize; - Mem mem; ThresholdedRandomCutForest trcf; - float modelMaxPercen; - ClusterService clusterService; - double modelMaxSizePercentage; - double modelDesiredSizePercentage; - JvmService jvmService; AnomalyDetector detector; - CircuitBreakerService circuitBreaker; @Override public void setUp() throws Exception { super.setUp(); - inputFeatures = 1; - rcfSampleSize = 256; - numberOfTrees = 30; - rcfTimeDecay = 0.2; - numMinSamples = 128; - shingleSize = 8; - dimension = inputFeatures * shingleSize; - - jvmService = mock(JvmService.class); - JvmInfo info = mock(JvmInfo.class); - mem = mock(Mem.class); - // 800 MB is the limit - largeHeapSize = 800_000_000; - smallHeapSize = 1_000_000; - - when(jvmService.info()).thenReturn(info); - when(info.getMem()).thenReturn(mem); - - modelMaxSizePercentage = 0.1; - modelDesiredSizePercentage = 0.0002; - - clusterService = mock(ClusterService.class); - modelMaxPercen = 0.1f; - Settings settings = Settings.builder().put(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE.getKey(), modelMaxPercen).build(); - ClusterSettings clusterSettings = new ClusterSettings( - settings, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE))) - ); - when(clusterService.getClusterSettings()).thenReturn(clusterSettings); expectedRCFModelSize = 382784; - detectorId = "123"; trcf = ThresholdedRandomCutForest .builder() @@ -119,15 +60,6 @@ public void setUp() throws Exception { detector = mock(AnomalyDetector.class); when(detector.getEnabledFeatureIds()).thenReturn(Collections.singletonList("a")); when(detector.getShingleSize()).thenReturn(1); - - circuitBreaker = mock(CircuitBreakerService.class); - when(circuitBreaker.isOpen()).thenReturn(false); - } - - private void setUpBigHeap() { - ByteSizeValue value = new ByteSizeValue(largeHeapSize); - when(mem.getHeapMax()).thenReturn(value); - tracker = new MemoryTracker(jvmService, modelMaxSizePercentage, clusterService, circuitBreaker); } private void setUpSmallHeap() { @@ -139,8 +71,8 @@ private void setUpSmallHeap() { public void testEstimateModelSize() { setUpBigHeap(); - assertEquals(403491, tracker.estimateTRCFModelSize(trcf)); - assertTrue(tracker.isHostingAllowed(detectorId, trcf)); + assertEquals(400768, tracker.estimateTRCFModelSize(trcf)); + assertTrue(tracker.isHostingAllowed(configId, trcf)); ThresholdedRandomCutForest rcf2 = ThresholdedRandomCutForest .builder() @@ -161,8 +93,8 @@ public void testEstimateModelSize() { .alertOnce(true) .autoAdjust(true) .build(); - assertEquals(603708, tracker.estimateTRCFModelSize(rcf2)); - assertTrue(tracker.isHostingAllowed(detectorId, rcf2)); + assertEquals(623944, tracker.estimateTRCFModelSize(rcf2)); + assertTrue(tracker.isHostingAllowed(configId, rcf2)); ThresholdedRandomCutForest rcf3 = ThresholdedRandomCutForest .builder() @@ -183,7 +115,7 @@ public void testEstimateModelSize() { .alertOnce(true) .autoAdjust(true) .build(); - assertEquals(1685208, tracker.estimateTRCFModelSize(rcf3)); + assertEquals(1789092, tracker.estimateTRCFModelSize(rcf3)); ThresholdedRandomCutForest rcf4 = ThresholdedRandomCutForest .builder() @@ -203,7 +135,7 @@ public void testEstimateModelSize() { .alertOnce(true) .autoAdjust(true) .build(); - assertEquals(521304, tracker.estimateTRCFModelSize(rcf4)); + assertEquals(609244, tracker.estimateTRCFModelSize(rcf4)); ThresholdedRandomCutForest rcf5 = ThresholdedRandomCutForest .builder() @@ -223,7 +155,7 @@ public void testEstimateModelSize() { .alertOnce(true) .autoAdjust(true) .build(); - assertEquals(467340, tracker.estimateTRCFModelSize(rcf5)); + assertEquals(518960, tracker.estimateTRCFModelSize(rcf5)); ThresholdedRandomCutForest rcf6 = ThresholdedRandomCutForest .builder() @@ -243,7 +175,7 @@ public void testEstimateModelSize() { .alertOnce(true) .autoAdjust(true) .build(); - assertEquals(603676, tracker.estimateTRCFModelSize(rcf6)); + assertEquals(746392, tracker.estimateTRCFModelSize(rcf6)); ThresholdedRandomCutForest rcf7 = ThresholdedRandomCutForest .builder() @@ -263,7 +195,7 @@ public void testEstimateModelSize() { .alertOnce(true) .autoAdjust(true) .build(); - assertEquals(401481, tracker.estimateTRCFModelSize(rcf7)); + assertEquals(434080, tracker.estimateTRCFModelSize(rcf7)); ThresholdedRandomCutForest rcf8 = ThresholdedRandomCutForest .builder() @@ -283,7 +215,7 @@ public void testEstimateModelSize() { .alertOnce(true) .autoAdjust(true) .build(); - assertEquals(1040432, tracker.estimateTRCFModelSize(rcf8)); + assertEquals(1571852, tracker.estimateTRCFModelSize(rcf8)); ThresholdedRandomCutForest rcf9 = ThresholdedRandomCutForest .builder() @@ -303,11 +235,11 @@ public void testEstimateModelSize() { .alertOnce(true) .autoAdjust(true) .build(); - assertEquals(1040688, tracker.estimateTRCFModelSize(rcf9)); + assertEquals(1243540, tracker.estimateTRCFModelSize(rcf9)); ThresholdedRandomCutForest rcf10 = ThresholdedRandomCutForest .builder() - .dimensions(325) + .dimensions(387) .sampleSize(rcfSampleSize) .numberOfTrees(numberOfTrees) .timeDecay(rcfTimeDecay) @@ -318,7 +250,7 @@ public void testEstimateModelSize() { .boundingBoxCacheFraction(TimeSeriesSettings.REAL_TIME_BOUNDING_BOX_CACHE_RATIO) .internalShinglingEnabled(true) // same with dimension for opportunistic memory saving - .shingleSize(65) + .shingleSize(129) .transformMethod(TransformMethod.NORMALIZE) .alertOnce(true) .autoAdjust(true) @@ -342,7 +274,7 @@ public void testCanAllocate() { public void testCannotHost() { setUpSmallHeap(); - expectThrows(LimitExceededException.class, () -> tracker.isHostingAllowed(detectorId, trcf)); + expectThrows(LimitExceededException.class, () -> tracker.isHostingAllowed(configId, trcf)); } public void testMemoryToShed() { diff --git a/src/test/java/org/opensearch/ad/caching/PriorityCacheTests.java b/src/test/java/org/opensearch/ad/caching/PriorityCacheTests.java index 4154687cf..5b209fffc 100644 --- a/src/test/java/org/opensearch/ad/caching/PriorityCacheTests.java +++ b/src/test/java/org/opensearch/ad/caching/PriorityCacheTests.java @@ -137,7 +137,7 @@ public void setUp() throws Exception { cacheProvider.set(cache); entityCache = cacheProvider.get(); - when(memoryTracker.estimateTRCFModelSize(anyInt(), anyInt(), anyDouble(), anyInt(), anyBoolean())).thenReturn(memoryPerEntity); + when(memoryTracker.estimateTRCFModelSize(anyInt(), anyInt(), anyDouble(), anyInt(), anyInt())).thenReturn(memoryPerEntity); when(memoryTracker.canAllocateReserved(anyLong())).thenReturn(true); detector2 = mock(AnomalyDetector.class); @@ -213,7 +213,7 @@ public void testCacheHit() { ArgumentCaptor origin = ArgumentCaptor.forClass(MemoryTracker.Origin.class); // input dimension: 3, shingle: 4 - long expectedMemoryPerEntity = 436828L; + long expectedMemoryPerEntity = 467872L; verify(memoryTracker, times(1)).consumeMemory(memoryConsumed.capture(), reserved.capture(), origin.capture()); assertEquals(dedicatedCacheSize * expectedMemoryPerEntity, memoryConsumed.getValue().intValue()); assertEquals(true, reserved.getValue().booleanValue()); diff --git a/src/test/java/org/opensearch/forecast/CasterMemoryTests.java b/src/test/java/org/opensearch/forecast/CasterMemoryTests.java new file mode 100644 index 000000000..dfe818cc1 --- /dev/null +++ b/src/test/java/org/opensearch/forecast/CasterMemoryTests.java @@ -0,0 +1,224 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.forecast; + +import org.opensearch.timeseries.AbstractMemoryTrackerTest; +import org.opensearch.timeseries.settings.TimeSeriesSettings; + +import com.amazon.randomcutforest.config.ForestMode; +import com.amazon.randomcutforest.config.Precision; +import com.amazon.randomcutforest.config.TransformMethod; +import com.amazon.randomcutforest.parkservices.RCFCaster; + +public class CasterMemoryTests extends AbstractMemoryTrackerTest { + RCFCaster caster; + int forecastHorizon; + + @Override + public void setUp() throws Exception { + super.setUp(); + forecastHorizon = shingleSize * 3; + caster = RCFCaster + .builder() + .dimensions(dimension) + .sampleSize(rcfSampleSize) + .numberOfTrees(numberOfTrees) + .timeDecay(rcfTimeDecay) + .outputAfter(numMinSamples) + .initialAcceptFraction(numMinSamples * 1.0d / rcfSampleSize) + .parallelExecutionEnabled(false) + .compact(true) + .precision(Precision.FLOAT_32) + .boundingBoxCacheFraction(TimeSeriesSettings.REAL_TIME_BOUNDING_BOX_CACHE_RATIO) + .shingleSize(shingleSize) + .internalShinglingEnabled(true) + .transformMethod(TransformMethod.NORMALIZE) + .forecastHorizon(forecastHorizon) + .forestMode(ForestMode.STANDARD) + .build(); + } + + public void testEstimateModelSize() { + setUpBigHeap(); + + assertEquals(416216, tracker.estimateCasterModelSize(caster)); + assertTrue(tracker.isHostingAllowed(configId, caster)); + + inputFeatures = 2; + shingleSize = 4; + dimension = inputFeatures * shingleSize; + forecastHorizon = 3 * shingleSize; + RCFCaster caster2 = RCFCaster + .builder() + .dimensions(dimension) + .sampleSize(rcfSampleSize) + .numberOfTrees(numberOfTrees) + .timeDecay(rcfTimeDecay) + .outputAfter(numMinSamples) + .initialAcceptFraction(numMinSamples * 1.0d / rcfSampleSize) + .parallelExecutionEnabled(false) + .compact(true) + .precision(Precision.FLOAT_32) + .boundingBoxCacheFraction(TimeSeriesSettings.REAL_TIME_BOUNDING_BOX_CACHE_RATIO) + .shingleSize(shingleSize) + .internalShinglingEnabled(true) + .transformMethod(TransformMethod.NORMALIZE) + .forecastHorizon(forecastHorizon) + .forestMode(ForestMode.STANDARD) + .build(); + assertEquals(423324, tracker.estimateCasterModelSize(caster2)); + assertTrue(tracker.isHostingAllowed(configId, caster2)); + + inputFeatures = 2; + shingleSize = 32; + dimension = inputFeatures * shingleSize; + forecastHorizon = 3 * shingleSize; + RCFCaster caster3 = RCFCaster + .builder() + .dimensions(dimension) + .sampleSize(rcfSampleSize) + .numberOfTrees(numberOfTrees) + .timeDecay(rcfTimeDecay) + .outputAfter(numMinSamples) + .initialAcceptFraction(numMinSamples * 1.0d / rcfSampleSize) + .parallelExecutionEnabled(false) + .compact(true) + .precision(Precision.FLOAT_32) + .boundingBoxCacheFraction(TimeSeriesSettings.REAL_TIME_BOUNDING_BOX_CACHE_RATIO) + .shingleSize(shingleSize) + .internalShinglingEnabled(true) + .transformMethod(TransformMethod.NORMALIZE) + .forecastHorizon(forecastHorizon) + .forestMode(ForestMode.STANDARD) + .build(); + assertEquals(824748, tracker.estimateCasterModelSize(caster3)); + assertTrue(tracker.isHostingAllowed(configId, caster3)); + + inputFeatures = 2; + shingleSize = 16; + dimension = inputFeatures * shingleSize; + forecastHorizon = 3 * shingleSize; + RCFCaster caster4 = RCFCaster + .builder() + .dimensions(dimension) + .sampleSize(rcfSampleSize) + .numberOfTrees(numberOfTrees) + .timeDecay(rcfTimeDecay) + .outputAfter(numMinSamples) + .initialAcceptFraction(numMinSamples * 1.0d / rcfSampleSize) + .parallelExecutionEnabled(false) + .compact(true) + .precision(Precision.FLOAT_32) + .boundingBoxCacheFraction(TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) + .shingleSize(shingleSize) + .internalShinglingEnabled(true) + .transformMethod(TransformMethod.NORMALIZE) + .forecastHorizon(forecastHorizon) + .forestMode(ForestMode.STANDARD) + .build(); + assertEquals(4000812, tracker.estimateCasterModelSize(caster4)); + assertTrue(tracker.isHostingAllowed(configId, caster4)); + + inputFeatures = 8; + shingleSize = 4; + dimension = inputFeatures * shingleSize; + forecastHorizon = 3 * shingleSize; + RCFCaster caster5 = RCFCaster + .builder() + .dimensions(dimension) + .sampleSize(rcfSampleSize) + .numberOfTrees(numberOfTrees) + .timeDecay(rcfTimeDecay) + .outputAfter(numMinSamples) + .initialAcceptFraction(numMinSamples * 1.0d / rcfSampleSize) + .parallelExecutionEnabled(false) + .compact(true) + .precision(Precision.FLOAT_32) + .boundingBoxCacheFraction(TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) + .shingleSize(shingleSize) + .internalShinglingEnabled(true) + .transformMethod(TransformMethod.NORMALIZE) + .forecastHorizon(forecastHorizon) + .forestMode(ForestMode.STANDARD) + .build(); + assertEquals(4161556, tracker.estimateCasterModelSize(caster5)); + assertTrue(tracker.isHostingAllowed(configId, caster5)); + + inputFeatures = 8; + shingleSize = 8; + dimension = inputFeatures * shingleSize; + forecastHorizon = 3 * shingleSize; + RCFCaster caster6 = RCFCaster + .builder() + .dimensions(dimension) + .sampleSize(rcfSampleSize) + .numberOfTrees(numberOfTrees) + .timeDecay(rcfTimeDecay) + .outputAfter(numMinSamples) + .initialAcceptFraction(numMinSamples * 1.0d / rcfSampleSize) + .parallelExecutionEnabled(false) + .compact(true) + .precision(Precision.FLOAT_32) + .boundingBoxCacheFraction(TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) + .shingleSize(shingleSize) + .internalShinglingEnabled(true) + .transformMethod(TransformMethod.NORMALIZE) + .forecastHorizon(forecastHorizon) + .forestMode(ForestMode.STANDARD) + .build(); + assertEquals(7672708, tracker.estimateCasterModelSize(caster6)); + assertTrue(tracker.isHostingAllowed(configId, caster6)); + + inputFeatures = 8; + shingleSize = 16; + dimension = inputFeatures * shingleSize; + forecastHorizon = 3 * shingleSize; + RCFCaster caster7 = RCFCaster + .builder() + .dimensions(dimension) + .sampleSize(rcfSampleSize) + .numberOfTrees(numberOfTrees) + .timeDecay(rcfTimeDecay) + .outputAfter(numMinSamples) + .initialAcceptFraction(numMinSamples * 1.0d / rcfSampleSize) + .parallelExecutionEnabled(false) + .compact(true) + .precision(Precision.FLOAT_32) + .boundingBoxCacheFraction(TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) + .shingleSize(shingleSize) + .internalShinglingEnabled(true) + .transformMethod(TransformMethod.NORMALIZE) + .forecastHorizon(forecastHorizon) + .forestMode(ForestMode.STANDARD) + .build(); + assertEquals(14693988, tracker.estimateCasterModelSize(caster7)); + assertTrue(tracker.isHostingAllowed(configId, caster7)); + + inputFeatures = 8; + shingleSize = 129; + dimension = inputFeatures * shingleSize; + forecastHorizon = 3 * shingleSize; + RCFCaster caster8 = RCFCaster + .builder() + .dimensions(dimension) + .sampleSize(rcfSampleSize) + .numberOfTrees(numberOfTrees) + .timeDecay(rcfTimeDecay) + .outputAfter(numMinSamples) + .initialAcceptFraction(numMinSamples * 1.0d / rcfSampleSize) + .parallelExecutionEnabled(false) + .compact(true) + .precision(Precision.FLOAT_32) + .boundingBoxCacheFraction(TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) + .shingleSize(129) + .internalShinglingEnabled(true) + .transformMethod(TransformMethod.NORMALIZE) + .forecastHorizon(forecastHorizon) + .forestMode(ForestMode.STANDARD) + .build(); + expectThrows(IllegalArgumentException.class, () -> tracker.estimateTRCFModelSize(caster8)); + } +} diff --git a/src/test/java/org/opensearch/timeseries/AbstractMemoryTrackerTest.java b/src/test/java/org/opensearch/timeseries/AbstractMemoryTrackerTest.java new file mode 100644 index 000000000..e58bfa650 --- /dev/null +++ b/src/test/java/org/opensearch/timeseries/AbstractMemoryTrackerTest.java @@ -0,0 +1,90 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.timeseries; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; + +import org.opensearch.ad.settings.AnomalyDetectorSettings; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.monitor.jvm.JvmInfo.Mem; +import org.opensearch.monitor.jvm.JvmService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.timeseries.breaker.CircuitBreakerService; + +public class AbstractMemoryTrackerTest extends OpenSearchTestCase { + protected long largeHeapSize; + protected long smallHeapSize; + protected Mem mem; + protected MemoryTracker tracker; + protected JvmService jvmService; + protected double modelMaxSizePercentage; + protected double modelDesiredSizePercentage; + protected ClusterService clusterService; + protected float modelMaxPercen; + protected CircuitBreakerService circuitBreaker; + protected int inputFeatures; + protected int rcfSampleSize; + protected int numberOfTrees; + protected double rcfTimeDecay; + protected int numMinSamples; + protected int shingleSize; + protected int dimension; + protected String configId; + + @Override + public void setUp() throws Exception { + super.setUp(); + mem = mock(Mem.class); + // 800 MB is the limit + largeHeapSize = 800_000_000; + smallHeapSize = 1_000_000; + + jvmService = mock(JvmService.class); + JvmInfo info = mock(JvmInfo.class); + + when(jvmService.info()).thenReturn(info); + when(info.getMem()).thenReturn(mem); + + modelMaxSizePercentage = 0.1; + modelDesiredSizePercentage = 0.0002; + + clusterService = mock(ClusterService.class); + Settings settings = Settings.builder().put(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE.getKey(), modelMaxPercen).build(); + ClusterSettings clusterSettings = new ClusterSettings( + settings, + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE))) + ); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + + modelMaxPercen = 0.1f; + circuitBreaker = mock(CircuitBreakerService.class); + when(circuitBreaker.isOpen()).thenReturn(false); + + inputFeatures = 1; + rcfSampleSize = 256; + numberOfTrees = 50; + rcfTimeDecay = 0.2; + numMinSamples = 128; + shingleSize = 8; + dimension = inputFeatures * shingleSize; + configId = "123"; + } + + protected void setUpBigHeap() { + ByteSizeValue value = new ByteSizeValue(largeHeapSize); + when(mem.getHeapMax()).thenReturn(value); + tracker = new MemoryTracker(jvmService, modelMaxSizePercentage, clusterService, circuitBreaker); + } +}