Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
## [Unreleased 3.x]
### Added
- Add temporal routing processors for time-based document routing ([#18920](https://github.com/opensearch-project/OpenSearch/issues/18920))
* Added code changes to support field level stat on segment [18973](https://github.com/opensearch-project/OpenSearch/pull/18973)

### Changed
- Add CompletionStage variants to methods in the Client Interface and default to ActionListener impl ([#18998](https://github.com/opensearch-project/OpenSearch/pull/18998))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,11 @@
"description":"Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)",
"default":false
},
"include_field_level_segment_file_sizes":{
"type":"boolean",
"description":"Whether to report the disk usage of Lucene index files at the field level (only applies if segment stats are requested)",
"default":false
},
"include_unloaded_segments":{
"type":"boolean",
"description":"If set to true segment stats will include stats for segments that are not currently loaded into memory",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,11 @@
"type":"boolean",
"description":"Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)",
"default":false
},
"include_field_level_segment_file_sizes":{
"type":"boolean",
"description":"Whether to report the disk usage of Lucene index files at the field level (only applies if segment stats are requested)",
"default":false
}
}
}
Expand Down
5 changes: 4 additions & 1 deletion server/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -395,17 +395,20 @@ tasks.named("sourcesJar").configure {
tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) {
logger.info("Comparing public APIs from ${version} to ${japicmpCompareTarget}")
// See please https://github.com/siom79/japicmp/issues/201
compatibilityChangeExcludes = [ "METHOD_ABSTRACT_NOW_DEFAULT", "METHOD_ADDED_TO_INTERFACE" ]
compatibilityChangeExcludes = [ "METHOD_ABSTRACT_NOW_DEFAULT", "METHOD_ADDED_TO_INTERFACE", "METHOD_REMOVED_IN_SUPERCLASS" ]
oldClasspath.from(files("${buildDir}/japicmp-target/opensearch-${japicmpCompareTarget}.jar"))
newClasspath.from(tasks.named('jar'))
// Restrict to modified elements only and fail only on binary-incompatible changes
onlyModified = true
onlyBinaryIncompatibleModified = true
failOnModification = true
ignoreMissingClasses = true
failOnSourceIncompatibility = true
annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi']
annotationExcludes = ['@org.opensearch.common.annotation.InternalApi', '@org.opensearch.common.annotation.ExperimentalApi']
txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt")
htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html")
xmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.xml")
dependsOn downloadJapicmpCompareTarget
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,15 +86,15 @@ public void testAutoForceMergeFeatureFlagDisabled() throws InterruptedException,
assertNotNull(shard);

// Before stats
SegmentsStats segmentsStatsBefore = shard.segmentStats(false, false);
SegmentsStats segmentsStatsBefore = shard.segmentStats(false, false, false);

// This is to make sure auto force merge action gets triggered multiple times ang gets successful at least once.
Thread.sleep(TimeValue.parseTimeValue(SCHEDULER_INTERVAL, "test").getMillis() * 3);
// refresh to clear old segments
flushAndRefresh(INDEX_NAME_1);

// After stats
SegmentsStats segmentsStatsAfter = shard.segmentStats(false, false);
SegmentsStats segmentsStatsAfter = shard.segmentStats(false, false, false);
assertEquals(segmentsStatsBefore.getCount(), segmentsStatsAfter.getCount());

// Deleting the index (so that ref count drops to zero for all the files) and then pruning the cache to clear it to avoid any file
Expand Down Expand Up @@ -123,9 +123,9 @@ public void testAutoForceMergeTriggeringWithOneShardOfNonWarmCandidate() throws
}
IndexShard shard = getIndexShard(dataNode, INDEX_NAME_1);
assertNotNull(shard);
SegmentsStats segmentsStatsBefore = shard.segmentStats(false, false);
SegmentsStats segmentsStatsBefore = shard.segmentStats(false, false, false);
Thread.sleep(TimeValue.parseTimeValue(SCHEDULER_INTERVAL, "test").getMillis() * 3);
SegmentsStats segmentsStatsAfter = shard.segmentStats(false, false);
SegmentsStats segmentsStatsAfter = shard.segmentStats(false, false, false);
assertEquals(segmentsStatsBefore.getCount(), segmentsStatsAfter.getCount());
assertAcked(client().admin().indices().prepareDelete(INDEX_NAME_1).get());
}
Expand All @@ -150,9 +150,9 @@ public void testAutoForceMergeTriggeringBasicWithOneShard() throws Exception {
}
IndexShard shard = getIndexShard(dataNode, INDEX_NAME_1);
assertNotNull(shard);
SegmentsStats segmentsStatsBefore = shard.segmentStats(false, false);
waitUntil(() -> shard.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
SegmentsStats segmentsStatsAfter = shard.segmentStats(false, false);
SegmentsStats segmentsStatsBefore = shard.segmentStats(false, false, false);
waitUntil(() -> shard.segmentStats(false, false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
SegmentsStats segmentsStatsAfter = shard.segmentStats(false, false, false);
assertTrue((int) segmentsStatsBefore.getCount() > segmentsStatsAfter.getCount());
assertEquals((int) SEGMENT_COUNT, segmentsStatsAfter.getCount());
assertAcked(client().admin().indices().prepareDelete(INDEX_NAME_1).get());
Expand Down Expand Up @@ -211,26 +211,26 @@ public void testAutoForceMergeTriggeringBasicWithFiveShardsOfTwoIndex() throws E
assertNotNull(shard4);
assertNotNull(shard5);

SegmentsStats segmentsStatsForShard1Before = shard1.segmentStats(false, false);
SegmentsStats segmentsStatsForShard2Before = shard2.segmentStats(false, false);
SegmentsStats segmentsStatsForShard3Before = shard3.segmentStats(false, false);
SegmentsStats segmentsStatsForShard4Before = shard4.segmentStats(false, false);
SegmentsStats segmentsStatsForShard5Before = shard5.segmentStats(false, false);
SegmentsStats segmentsStatsForShard1Before = shard1.segmentStats(false, false, false);
SegmentsStats segmentsStatsForShard2Before = shard2.segmentStats(false, false, false);
SegmentsStats segmentsStatsForShard3Before = shard3.segmentStats(false, false, false);
SegmentsStats segmentsStatsForShard4Before = shard4.segmentStats(false, false, false);
SegmentsStats segmentsStatsForShard5Before = shard5.segmentStats(false, false, false);
AtomicLong totalSegmentsBefore = new AtomicLong(
segmentsStatsForShard1Before.getCount() + segmentsStatsForShard2Before.getCount() + segmentsStatsForShard3Before.getCount()
+ segmentsStatsForShard4Before.getCount() + segmentsStatsForShard5Before.getCount()
);
assertTrue(totalSegmentsBefore.get() > 5);
waitUntil(() -> shard1.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
waitUntil(() -> shard2.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
waitUntil(() -> shard3.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
waitUntil(() -> shard4.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
waitUntil(() -> shard5.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
SegmentsStats segmentsStatsForShard1After = shard1.segmentStats(false, false);
SegmentsStats segmentsStatsForShard2After = shard2.segmentStats(false, false);
SegmentsStats segmentsStatsForShard3After = shard3.segmentStats(false, false);
SegmentsStats segmentsStatsForShard4After = shard4.segmentStats(false, false);
SegmentsStats segmentsStatsForShard5After = shard5.segmentStats(false, false);
waitUntil(() -> shard1.segmentStats(false, false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
waitUntil(() -> shard2.segmentStats(false, false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
waitUntil(() -> shard3.segmentStats(false, false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
waitUntil(() -> shard4.segmentStats(false, false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
waitUntil(() -> shard5.segmentStats(false, false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES);
SegmentsStats segmentsStatsForShard1After = shard1.segmentStats(false, false, false);
SegmentsStats segmentsStatsForShard2After = shard2.segmentStats(false, false, false);
SegmentsStats segmentsStatsForShard3After = shard3.segmentStats(false, false, false);
SegmentsStats segmentsStatsForShard4After = shard4.segmentStats(false, false, false);
SegmentsStats segmentsStatsForShard5After = shard5.segmentStats(false, false, false);
AtomicLong totalSegmentsAfter = new AtomicLong(
segmentsStatsForShard1After.getCount() + segmentsStatsForShard2After.getCount() + segmentsStatsForShard3After.getCount()
+ segmentsStatsForShard4After.getCount() + segmentsStatsForShard5After.getCount()
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/

package org.opensearch.index.engine;

import org.opensearch.action.admin.indices.stats.IndicesStatsRequest;
import org.opensearch.action.admin.indices.stats.IndicesStatsResponse;
import org.opensearch.test.OpenSearchIntegTestCase;

import java.util.Map;

import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.hamcrest.Matchers.greaterThan;

/**
* Integration test for field-level segment statistics
*/
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2)
public class FieldLevelSegmentStatsIT extends OpenSearchIntegTestCase {

/**
* Comprehensive test for field-level segment stats API covering single and multiple indices
*/
public void testFieldLevelSegmentStatsEndToEnd() {
String index1 = "test-field-stats";
String index2 = "test-index-2";

// Create first index with explicit mapping
assertAcked(
prepareCreate(index1).setMapping(
"title",
"type=text",
"keyword_field",
"type=keyword",
"numeric_field",
"type=long",
"date_field",
"type=date"
)
);

// Create second index with different mapping
assertAcked(prepareCreate(index2).setMapping("field1", "type=text", "field2", "type=keyword"));

// Index documents to first index
int numDocs = 100;
for (int i = 0; i < numDocs; i++) {
client().prepareIndex(index1)
.setId(String.valueOf(i))
.setSource(
"title",
"Document title " + i,
"keyword_field",
"keyword_" + (i % 10),
"numeric_field",
i,
"date_field",
"2025-01-" + ((i % 28) + 1 < 10 ? "0" + ((i % 28) + 1) : String.valueOf((i % 28) + 1))
)
.get();
}

// Index documents to second index
for (int i = 0; i < 50; i++) {
client().prepareIndex(index2).setId(String.valueOf(i)).setSource("field1", "text " + i, "field2", "keyword" + i).get();
}

refresh(index1, index2);

// Test single index with field-level stats
IndicesStatsRequest singleIndexRequest = new IndicesStatsRequest();
singleIndexRequest.indices(index1);
singleIndexRequest.segments(true);
singleIndexRequest.includeFieldLevelSegmentFileSizes(true);

IndicesStatsResponse singleIndexResponse = client().admin().indices().stats(singleIndexRequest).actionGet();

SegmentsStats segmentStats = singleIndexResponse.getIndex(index1).getTotal().getSegments();
assertNotNull("Segment stats should not be null", segmentStats);

Map<String, Map<String, Long>> fieldLevelStats = segmentStats.getFieldLevelFileSizes();
assertNotNull("Field-level stats should not be null", fieldLevelStats);
assertFalse("Field-level stats should not be empty", fieldLevelStats.isEmpty());

// Verify expected fields are present
assertTrue("Should have stats for title", fieldLevelStats.containsKey("title"));
assertTrue("Should have stats for keyword_field", fieldLevelStats.containsKey("keyword_field"));
assertTrue("Should have stats for numeric_field", fieldLevelStats.containsKey("numeric_field"));
assertTrue("Should have stats for date_field", fieldLevelStats.containsKey("date_field"));

// Verify stats have positive values
for (Map.Entry<String, Map<String, Long>> fieldEntry : fieldLevelStats.entrySet()) {
assertFalse("Field " + fieldEntry.getKey() + " should have file stats", fieldEntry.getValue().isEmpty());
for (Map.Entry<String, Long> fileEntry : fieldEntry.getValue().entrySet()) {
assertThat("File sizes should be positive", fileEntry.getValue(), greaterThan(0L));
}
}

// Test multiple indices
IndicesStatsRequest multiIndexRequest = new IndicesStatsRequest();
multiIndexRequest.indices(index1, index2);
multiIndexRequest.segments(true);
multiIndexRequest.includeFieldLevelSegmentFileSizes(true);

IndicesStatsResponse multiIndexResponse = client().admin().indices().stats(multiIndexRequest).actionGet();

// Verify both indices have field-level stats
SegmentsStats stats1 = multiIndexResponse.getIndex(index1).getTotal().getSegments();
assert stats1 != null;
assertFalse("Index1 field-level stats should not be empty", stats1.getFieldLevelFileSizes().isEmpty());
assertTrue("Index1 should have stats for title", stats1.getFieldLevelFileSizes().containsKey("title"));

SegmentsStats stats2 = multiIndexResponse.getIndex(index2).getTotal().getSegments();
assert stats2 != null;
assertFalse("Index2 field-level stats should not be empty", stats2.getFieldLevelFileSizes().isEmpty());
assertTrue("Index2 should have stats for field1", stats2.getFieldLevelFileSizes().containsKey("field1"));
assertTrue("Index2 should have stats for field2", stats2.getFieldLevelFileSizes().containsKey("field2"));

// Test backward compatibility (without field-level stats)
IndicesStatsRequest normalRequest = new IndicesStatsRequest();
normalRequest.indices(index1);
normalRequest.segments(true);
normalRequest.includeFieldLevelSegmentFileSizes(false);

IndicesStatsResponse normalResponse = client().admin().indices().stats(normalRequest).actionGet();
SegmentsStats normalSegmentStats = normalResponse.getIndex(index1).getTotal().getSegments();

assertNotNull("Normal segment stats should not be null", normalSegmentStats);
assertTrue("Normal request should not include field-level stats", normalSegmentStats.getFieldLevelFileSizes().isEmpty());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,11 @@ public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, C
completion = indexShard.completionStats(flags.completionDataFields());
break;
case Segments:
segments = indexShard.segmentStats(flags.includeSegmentFileSizes(), flags.includeUnloadedSegments());
segments = indexShard.segmentStats(
flags.includeSegmentFileSizes(),
flags.includeFieldLevelSegmentFileSizes(),
flags.includeUnloadedSegments()
);
break;
case Translog:
translog = indexShard.translogStats();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ public class CommonStatsFlags implements Writeable, Cloneable {
private String[] fieldDataFields = null;
private String[] completionDataFields = null;
private boolean includeSegmentFileSizes = false;
private boolean includeFieldLevelSegmentFileSizes = false;
private boolean includeUnloadedSegments = false;
private boolean includeAllShardIndexingPressureTrackers = false;
private boolean includeOnlyTopIndexingPressureMetrics = false;
Expand Down Expand Up @@ -94,6 +95,11 @@ public CommonStatsFlags(StreamInput in) throws IOException {
fieldDataFields = in.readStringArray();
completionDataFields = in.readStringArray();
includeSegmentFileSizes = in.readBoolean();
if (in.getVersion().onOrAfter(Version.V_3_2_0)) {
includeFieldLevelSegmentFileSizes = in.readBoolean();
} else {
includeFieldLevelSegmentFileSizes = false;
}
includeUnloadedSegments = in.readBoolean();
includeAllShardIndexingPressureTrackers = in.readBoolean();
includeOnlyTopIndexingPressureMetrics = in.readBoolean();
Expand Down Expand Up @@ -121,6 +127,9 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeStringArrayNullable(fieldDataFields);
out.writeStringArrayNullable(completionDataFields);
out.writeBoolean(includeSegmentFileSizes);
if (out.getVersion().onOrAfter(Version.V_3_2_0)) {
out.writeBoolean(includeFieldLevelSegmentFileSizes);
}
out.writeBoolean(includeUnloadedSegments);
out.writeBoolean(includeAllShardIndexingPressureTrackers);
out.writeBoolean(includeOnlyTopIndexingPressureMetrics);
Expand All @@ -142,6 +151,7 @@ public CommonStatsFlags all() {
fieldDataFields = null;
completionDataFields = null;
includeSegmentFileSizes = false;
includeFieldLevelSegmentFileSizes = false;
includeUnloadedSegments = false;
includeAllShardIndexingPressureTrackers = false;
includeOnlyTopIndexingPressureMetrics = false;
Expand All @@ -159,6 +169,7 @@ public CommonStatsFlags clear() {
fieldDataFields = null;
completionDataFields = null;
includeSegmentFileSizes = false;
includeFieldLevelSegmentFileSizes = false;
includeUnloadedSegments = false;
includeAllShardIndexingPressureTrackers = false;
includeOnlyTopIndexingPressureMetrics = false;
Expand Down Expand Up @@ -223,6 +234,11 @@ public CommonStatsFlags includeSegmentFileSizes(boolean includeSegmentFileSizes)
return this;
}

public CommonStatsFlags includeFieldLevelSegmentFileSizes(boolean includeFieldLevelSegmentFileSizes) {
this.includeFieldLevelSegmentFileSizes = includeFieldLevelSegmentFileSizes;
return this;
}

public CommonStatsFlags includeUnloadedSegments(boolean includeUnloadedSegments) {
this.includeUnloadedSegments = includeUnloadedSegments;
return this;
Expand Down Expand Up @@ -269,6 +285,10 @@ public boolean includeSegmentFileSizes() {
return this.includeSegmentFileSizes;
}

public boolean includeFieldLevelSegmentFileSizes() {
return this.includeFieldLevelSegmentFileSizes;
}

public void setIncludeIndicesStatsByLevel(boolean includeIndicesStatsByLevel) {
this.includeIndicesStatsByLevel = includeIndicesStatsByLevel;
}
Expand Down
Loading
Loading