Skip to content

Commit

Permalink
This reverts commit 9ff8c77. (#508)
Browse files Browse the repository at this point in the history
Signed-off-by: Khushboo Rajput <[email protected]>
(cherry picked from commit 75bbd37)
  • Loading branch information
khushbr authored and github-actions[bot] committed Oct 6, 2023
1 parent 6a1ef31 commit d65669b
Show file tree
Hide file tree
Showing 12 changed files with 1 addition and 1,374 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -464,11 +464,6 @@ public class MetricsModel {
MetricUnits.MILLISECOND.toString(),
AllMetrics.ShardIndexingPressureDimension.values()));

// Search Back Pressure Metrics
allMetricsInitializer.put(
AllMetrics.SearchBackPressureStatsValue.SEARCHBP_SHARD_STATS_CANCELLATIONCOUNT
.toString(),
new MetricAttributes(MetricUnits.COUNT.toString(), EmptyDimension.values()));
ALL_METRICS = Collections.unmodifiableMap(allMetricsInitializer);
}
}

This file was deleted.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -135,24 +135,6 @@ public class ResourceUtil {
.setResourceEnum(ResourceEnum.SHARD_REQUEST_CACHE)
.setMetricEnum(MetricEnum.CACHE_MAX_SIZE)
.build();
/*
* searchbackpressure related resource
* SEARCHBACKPRESSURE_SHARD resource indicate a searchbackpressure unhealthy resource unit is caused by shard level cancellation
*/
public static final Resource SEARCHBACKPRESSURE_SHARD =
Resource.newBuilder()
.setResourceEnum(ResourceEnum.SEARCHBP)
.setMetricEnum(MetricEnum.SEARCHBP_SHARD)
.build();

/*
* SEARCHBACKPRESSURE_TASK resource indicate a searchbackpressure unhealthy resource unit is caused by task level cancellation
*/
public static final Resource SEARCHBACKPRESSURE_TASK =
Resource.newBuilder()
.setResourceEnum(ResourceEnum.SEARCHBP)
.setMetricEnum(MetricEnum.SEARCHBP_TASK)
.build();

/**
* Read the resourceType name from the ResourceType object
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@
import org.opensearch.performanceanalyzer.rca.configs.HotShardRcaConfig;
import org.opensearch.performanceanalyzer.rca.configs.OldGenContendedRcaConfig;
import org.opensearch.performanceanalyzer.rca.configs.QueueRejectionRcaConfig;
import org.opensearch.performanceanalyzer.rca.configs.SearchBackPressureRcaConfig;
import org.opensearch.performanceanalyzer.rca.configs.ShardRequestCacheRcaConfig;
import org.opensearch.performanceanalyzer.rca.framework.api.summaries.bucket.BasicBucketCalculator;
import org.opensearch.performanceanalyzer.rca.framework.api.summaries.bucket.BucketCalculator;
Expand Down Expand Up @@ -233,10 +232,6 @@ public OldGenContendedRcaConfig getOldGenContendedRcaConfig() {
return new OldGenContendedRcaConfig(this);
}

public SearchBackPressureRcaConfig getSearchBackPressureRcaConfig() {
return new SearchBackPressureRcaConfig(this);
}

public <T> T readRcaConfig(
String rcaName, String key, T defaultValue, Class<? extends T> clazz) {
return readRcaConfig(rcaName, key, defaultValue, (s) -> true, clazz);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
import org.opensearch.performanceanalyzer.rca.framework.api.metrics.Heap_Max;
import org.opensearch.performanceanalyzer.rca.framework.api.metrics.Heap_Used;
import org.opensearch.performanceanalyzer.rca.framework.api.metrics.IndexWriter_Memory;
import org.opensearch.performanceanalyzer.rca.framework.api.metrics.Searchbp_Stats;
import org.opensearch.performanceanalyzer.rca.framework.api.metrics.ThreadPool_QueueCapacity;
import org.opensearch.performanceanalyzer.rca.framework.api.metrics.ThreadPool_RejectedReqs;
import org.opensearch.performanceanalyzer.rca.framework.api.metrics.Thread_Blocked_Time;
Expand Down Expand Up @@ -86,8 +85,6 @@
import org.opensearch.performanceanalyzer.rca.store.rca.jvmsizing.LargeHeapClusterRca;
import org.opensearch.performanceanalyzer.rca.store.rca.jvmsizing.OldGenContendedRca;
import org.opensearch.performanceanalyzer.rca.store.rca.jvmsizing.OldGenReclamationRca;
import org.opensearch.performanceanalyzer.rca.store.rca.searchbackpressure.SearchBackPressureClusterRCA;
import org.opensearch.performanceanalyzer.rca.store.rca.searchbackpressure.SearchBackPressureRCA;
import org.opensearch.performanceanalyzer.rca.store.rca.temperature.NodeTemperatureRca;
import org.opensearch.performanceanalyzer.rca.store.rca.temperature.dimension.CpuUtilDimensionTemperatureRca;
import org.opensearch.performanceanalyzer.rca.store.rca.temperature.dimension.HeapAllocRateTemperatureRca;
Expand Down Expand Up @@ -120,9 +117,6 @@ public void construct() {
MetricsDB.AVG,
AllMetrics.CommonDimension.OPERATION.toString());

// SearchBackpressure Metric
Metric searchbp_Stats = new Searchbp_Stats(EVALUATION_INTERVAL_SECONDS);

heapUsed.addTag(
RcaConsts.RcaTagConstants.TAG_LOCUS,
RcaConsts.RcaTagConstants.LOCUS_DATA_CLUSTER_MANAGER_NODE);
Expand All @@ -147,9 +141,6 @@ public void construct() {
threadWaitedTime.addTag(
RcaConsts.RcaTagConstants.TAG_LOCUS,
RcaConsts.RcaTagConstants.LOCUS_DATA_CLUSTER_MANAGER_NODE);
searchbp_Stats.addTag(
RcaConsts.RcaTagConstants.TAG_LOCUS,
RcaConsts.RcaTagConstants.LOCUS_DATA_CLUSTER_MANAGER_NODE);

addLeaf(heapUsed);
addLeaf(gcEvent);
Expand All @@ -159,7 +150,6 @@ public void construct() {
addLeaf(cpuUtilizationGroupByOperation);
addLeaf(threadBlockedTime);
addLeaf(threadWaitedTime);
addLeaf(searchbp_Stats);

// add node stats metrics
List<Metric> nodeStatsMetrics = constructNodeStatsMetrics();
Expand Down Expand Up @@ -443,28 +433,6 @@ public void construct() {
shardRequestCacheClusterRca,
highHeapUsageClusterRca));

// Search Back Pressure Service RCA enabled
SearchBackPressureRCA searchBackPressureRCA =
new SearchBackPressureRCA(RCA_PERIOD, heapMax, heapUsed, searchbp_Stats);
searchBackPressureRCA.addTag(
RcaConsts.RcaTagConstants.TAG_LOCUS,
RcaConsts.RcaTagConstants.LOCUS_DATA_CLUSTER_MANAGER_NODE);
searchBackPressureRCA.addAllUpstreams(Arrays.asList(heapMax, heapUsed, searchbp_Stats));

// Search Back Pressure Service Cluster RCA enabled
SearchBackPressureClusterRCA searchBackPressureClusterRCA =
new SearchBackPressureClusterRCA(RCA_PERIOD, searchBackPressureRCA);
searchBackPressureClusterRCA.addTag(
RcaConsts.RcaTagConstants.TAG_LOCUS,
RcaConsts.RcaTagConstants.LOCUS_CLUSTER_MANAGER_NODE);
searchBackPressureClusterRCA.addAllUpstreams(
Collections.singletonList(searchBackPressureRCA));
searchBackPressureClusterRCA.addTag(
RcaConsts.RcaTagConstants.TAG_AGGREGATE_UPSTREAM,
RcaConsts.RcaTagConstants.LOCUS_DATA_NODE);

// TODO: Add SearchBackPressure Decider

AdmissionControlDecider admissionControlDecider =
buildAdmissionControlDecider(heapUsed, heapMax);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ protected boolean isOldGenCollectorCMS() {
return true;
}

/** Sliding window to check the minimal old gen usage within a given time frame */
/** Sliding window to check the minimal olg gen usage within a given time frame */
public static class MinOldGenSlidingWindow extends SlidingWindow<SlidingWindowData> {

public MinOldGenSlidingWindow(int SLIDING_WINDOW_SIZE_IN_TIMESTAMP, TimeUnit timeUnit) {
Expand Down Expand Up @@ -250,60 +250,4 @@ public double readMin() {
return Double.NaN;
}
}

/**
* Sliding window to check the max/min old gen usage within a given time frame
*
* @param isMinSlidingWindow true if the sliding window is for min usage, false for max usage
* Provides a more general framework than MinOldGenSlidingWindow as this sliding window can
* be implemented as minSlidingWindow or maxSlidingWindow depending on the need.
*/
public static class MinMaxSlidingWindow extends SlidingWindow<SlidingWindowData> {
boolean isMinSlidingWindow;

public MinMaxSlidingWindow(
int SLIDING_WINDOW_SIZE_IN_TIMESTAMP,
TimeUnit timeUnit,
boolean isMinSlidingWindow) {
super(SLIDING_WINDOW_SIZE_IN_TIMESTAMP, timeUnit);
this.isMinSlidingWindow = isMinSlidingWindow;
}

@Override
public void next(SlidingWindowData e) {
if (isMinSlidingWindow) {
// monotonically decreasing sliding window
while (!windowDeque.isEmpty()
&& windowDeque.peekFirst().getValue() >= e.getValue()) {
windowDeque.pollFirst();
}
} else {
// monotonically increasing sliding window
while (!windowDeque.isEmpty()
&& windowDeque.peekFirst().getValue() < e.getValue()) {
windowDeque.pollFirst();
}
}

windowDeque.addFirst(e);
while (!windowDeque.isEmpty()
&& TimeUnit.MILLISECONDS.toSeconds(
e.getTimeStamp() - windowDeque.peekLast().getTimeStamp())
> SLIDING_WINDOW_SIZE) {
windowDeque.pollLast();
}
}

/*
* read last element in the window
* if the sliding window is MinSlidingWindow then returns the min element
* else return the max element in the deque
*/
public double readLastElementInWindow() {
if (!windowDeque.isEmpty()) {
return windowDeque.peekLast().getValue();
}
return Double.NaN;
}
}
}

This file was deleted.

Loading

0 comments on commit d65669b

Please sign in to comment.