Skip to content

Commit 29c5d35

Browse files
Fix failures for EXPLAIN and EXPLAIN ANALYZE on Iceberg OPTIMIZE queries
1. Fixed server-side failure in EXPLAIN; when IGNORE_STATS_CALCULATOR_FAILURES is false, the query failed. 2. Fixed failure in EXPLAIN ANALYZE OPTIMIZE.
1 parent 4b67d9f commit 29c5d35

File tree

4 files changed

+287
-3
lines changed

4 files changed

+287
-3
lines changed

core/trino-main/src/main/java/io/trino/sql/planner/sanity/TableExecuteStructureValidator.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import io.trino.sql.PlannerContext;
1919
import io.trino.sql.planner.AdaptivePlanner;
2020
import io.trino.sql.planner.plan.ExchangeNode;
21+
import io.trino.sql.planner.plan.ExplainAnalyzeNode;
2122
import io.trino.sql.planner.plan.OutputNode;
2223
import io.trino.sql.planner.plan.PlanNode;
2324
import io.trino.sql.planner.plan.ProjectNode;
@@ -65,6 +66,7 @@ private boolean isAllowedNode(PlanNode node)
6566
|| node instanceof TableExecuteNode
6667
|| node instanceof OutputNode
6768
|| node instanceof ExchangeNode
68-
|| node instanceof TableFinishNode;
69+
|| node instanceof TableFinishNode
70+
|| node instanceof ExplainAnalyzeNode;
6971
}
7072
}
Lines changed: 243 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,243 @@
1+
/*
2+
* Licensed under the Apache License, Version 2.0 (the "License");
3+
* you may not use this file except in compliance with the License.
4+
* You may obtain a copy of the License at
5+
*
6+
* http://www.apache.org/licenses/LICENSE-2.0
7+
*
8+
* Unless required by applicable law or agreed to in writing, software
9+
* distributed under the License is distributed on an "AS IS" BASIS,
10+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
* See the License for the specific language governing permissions and
12+
* limitations under the License.
13+
*/
14+
package io.trino.sql.planner.sanity;
15+
16+
import com.google.common.collect.ImmutableList;
17+
import com.google.common.collect.ImmutableMap;
18+
import io.trino.connector.CatalogHandle;
19+
import io.trino.execution.warnings.WarningCollector;
20+
import io.trino.metadata.TableExecuteHandle;
21+
import io.trino.metadata.TableHandle;
22+
import io.trino.plugin.tpch.TpchTableHandle;
23+
import io.trino.plugin.tpch.TpchTransactionHandle;
24+
import io.trino.spi.connector.ColumnHandle;
25+
import io.trino.spi.connector.SchemaTableName;
26+
import io.trino.spi.connector.WriterScalingOptions;
27+
import io.trino.spi.predicate.TupleDomain;
28+
import io.trino.sql.ir.Constant;
29+
import io.trino.sql.ir.Expression;
30+
import io.trino.sql.planner.OrderingScheme;
31+
import io.trino.sql.planner.Partitioning;
32+
import io.trino.sql.planner.PartitioningScheme;
33+
import io.trino.sql.planner.PlanNodeIdAllocator;
34+
import io.trino.sql.planner.Symbol;
35+
import io.trino.sql.planner.assertions.BasePlanTest;
36+
import io.trino.sql.planner.plan.Assignments;
37+
import io.trino.sql.planner.plan.ExchangeNode;
38+
import io.trino.sql.planner.plan.ExplainAnalyzeNode;
39+
import io.trino.sql.planner.plan.FilterNode;
40+
import io.trino.sql.planner.plan.OutputNode;
41+
import io.trino.sql.planner.plan.PlanNode;
42+
import io.trino.sql.planner.plan.ProjectNode;
43+
import io.trino.sql.planner.plan.StatisticAggregations;
44+
import io.trino.sql.planner.plan.StatisticAggregationsDescriptor;
45+
import io.trino.sql.planner.plan.TableExecuteNode;
46+
import io.trino.sql.planner.plan.TableFinishNode;
47+
import io.trino.sql.planner.plan.TableScanNode;
48+
import io.trino.sql.planner.plan.TableWriterNode;
49+
import io.trino.testing.TestingTableExecuteHandle;
50+
import io.trino.testing.TestingTransactionHandle;
51+
import org.junit.jupiter.api.BeforeAll;
52+
import org.junit.jupiter.api.Test;
53+
54+
import java.util.List;
55+
import java.util.Map;
56+
import java.util.Optional;
57+
58+
import static io.trino.spi.type.BigintType.BIGINT;
59+
import static io.trino.sql.planner.SystemPartitioningHandle.SINGLE_DISTRIBUTION;
60+
import static io.trino.sql.planner.TestingPlannerContext.PLANNER_CONTEXT;
61+
import static io.trino.sql.planner.plan.ExchangeNode.Scope.LOCAL;
62+
import static io.trino.sql.planner.plan.ExchangeNode.Type.REPARTITION;
63+
import static io.trino.testing.TestingHandles.TEST_CATALOG_HANDLE;
64+
import static org.assertj.core.api.Assertions.assertThatThrownBy;
65+
66+
final class TestTableExecuteStructureValidator
67+
extends BasePlanTest
68+
{
69+
private final PlanNodeIdAllocator idAllocator = new PlanNodeIdAllocator();
70+
private final TableExecuteStructureValidator validator = new TableExecuteStructureValidator();
71+
private final Expression predicate = new Constant(BIGINT, 1L);
72+
private final Symbol symbol = new Symbol(BIGINT, "bigint");
73+
private final List<Symbol> symbols = ImmutableList.of(symbol);
74+
private final List<String> columnNames = ImmutableList.of("bigint");
75+
private final Assignments assignments = Assignments.of(symbol, predicate);
76+
private final Map<Symbol, ColumnHandle> assignmentsMap = ImmutableMap.of(symbol, new ColumnHandle() {});
77+
private final Optional<OrderingScheme> orderingSchema = Optional.empty();
78+
private final Optional<PartitioningScheme> partitioningSchema = Optional.empty();
79+
private final Optional<StatisticAggregations> statisticAggregations = Optional.empty();
80+
private final Optional<StatisticAggregationsDescriptor<Symbol>> statisticsAggregationDescriptor = Optional.empty();
81+
82+
private TableScanNode tableScanNode;
83+
private TableWriterNode.TableExecuteTarget tableExecuteTarget;
84+
private PartitioningScheme partitioningScheme;
85+
86+
@BeforeAll
87+
void setup()
88+
{
89+
CatalogHandle catalogHandle = getCurrentCatalogHandle();
90+
91+
TableHandle nationTableHandle = new TableHandle(
92+
catalogHandle,
93+
new TpchTableHandle("sf1", "nation", 1.0),
94+
TpchTransactionHandle.INSTANCE);
95+
96+
tableScanNode = new TableScanNode(
97+
idAllocator.getNextId(),
98+
nationTableHandle,
99+
symbols,
100+
assignmentsMap,
101+
TupleDomain.all(),
102+
Optional.empty(),
103+
false,
104+
Optional.empty());
105+
106+
tableExecuteTarget = new TableWriterNode.TableExecuteTarget(
107+
new TableExecuteHandle(
108+
TEST_CATALOG_HANDLE,
109+
TestingTransactionHandle.create(),
110+
new TestingTableExecuteHandle()),
111+
Optional.empty(),
112+
new SchemaTableName("schemaName", "tableName"),
113+
WriterScalingOptions.DISABLED);
114+
115+
partitioningScheme = new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, symbols), symbols);
116+
}
117+
118+
@Test
119+
void testValidateSuccessfulWithExecuteNode()
120+
{
121+
PlanNode root = new OutputNode(idAllocator.getNextId(),
122+
new ExplainAnalyzeNode(idAllocator.getNextId(),
123+
new TableExecuteNode(idAllocator.getNextId(),
124+
new ExchangeNode(idAllocator.getNextId(),
125+
REPARTITION,
126+
LOCAL,
127+
partitioningScheme,
128+
ImmutableList.of(new TableFinishNode(idAllocator.getNextId(),
129+
new ExchangeNode(idAllocator.getNextId(),
130+
REPARTITION,
131+
LOCAL,
132+
partitioningScheme,
133+
ImmutableList.of(
134+
new ProjectNode(idAllocator.getNextId(),
135+
tableScanNode,
136+
assignments)),
137+
ImmutableList.of(symbols),
138+
orderingSchema),
139+
tableExecuteTarget,
140+
symbol,
141+
statisticAggregations,
142+
statisticsAggregationDescriptor)),
143+
ImmutableList.of(symbols),
144+
orderingSchema),
145+
tableExecuteTarget,
146+
symbol,
147+
symbol,
148+
symbols,
149+
columnNames,
150+
partitioningSchema),
151+
symbol,
152+
symbols,
153+
false),
154+
columnNames,
155+
symbols);
156+
validator.validate(root, null, PLANNER_CONTEXT, WarningCollector.NOOP);
157+
}
158+
159+
@Test
160+
void testValidateSuccessfulWithoutExecuteNode()
161+
{
162+
PlanNode root = new OutputNode(idAllocator.getNextId(),
163+
new ExplainAnalyzeNode(idAllocator.getNextId(),
164+
new ExchangeNode(idAllocator.getNextId(),
165+
REPARTITION,
166+
LOCAL,
167+
partitioningScheme,
168+
ImmutableList.of(new TableFinishNode(idAllocator.getNextId(),
169+
new ExchangeNode(idAllocator.getNextId(),
170+
REPARTITION,
171+
LOCAL,
172+
partitioningScheme,
173+
ImmutableList.of(
174+
new ProjectNode(idAllocator.getNextId(),
175+
tableScanNode,
176+
assignments)),
177+
ImmutableList.of(symbols),
178+
orderingSchema),
179+
tableExecuteTarget,
180+
symbol,
181+
statisticAggregations,
182+
statisticsAggregationDescriptor)),
183+
ImmutableList.of(symbols),
184+
orderingSchema),
185+
symbol,
186+
symbols,
187+
false),
188+
columnNames,
189+
symbols);
190+
validator.validate(root, null, PLANNER_CONTEXT, WarningCollector.NOOP);
191+
}
192+
193+
@Test
194+
void testValidateFailed()
195+
{
196+
PlanNode root = new OutputNode(idAllocator.getNextId(),
197+
new ExplainAnalyzeNode(idAllocator.getNextId(),
198+
new TableExecuteNode(idAllocator.getNextId(),
199+
new ExchangeNode(idAllocator.getNextId(),
200+
REPARTITION,
201+
LOCAL,
202+
partitioningScheme,
203+
ImmutableList.of(new FilterNode(idAllocator.getNextId(),
204+
new ExchangeNode(idAllocator.getNextId(),
205+
REPARTITION,
206+
LOCAL,
207+
partitioningScheme,
208+
ImmutableList.of(new TableFinishNode(idAllocator.getNextId(),
209+
new ExchangeNode(idAllocator.getNextId(),
210+
REPARTITION,
211+
LOCAL,
212+
partitioningScheme,
213+
ImmutableList.of(
214+
new ProjectNode(idAllocator.getNextId(),
215+
tableScanNode,
216+
assignments)),
217+
ImmutableList.of(symbols),
218+
orderingSchema),
219+
tableExecuteTarget,
220+
symbol,
221+
statisticAggregations,
222+
statisticsAggregationDescriptor)),
223+
ImmutableList.of(symbols),
224+
orderingSchema),
225+
predicate)),
226+
ImmutableList.of(symbols),
227+
orderingSchema),
228+
tableExecuteTarget,
229+
symbol,
230+
symbol,
231+
symbols,
232+
columnNames,
233+
partitioningSchema),
234+
symbol,
235+
symbols,
236+
false),
237+
columnNames,
238+
symbols);
239+
assertThatThrownBy(() -> validator.validate(root, null, PLANNER_CONTEXT, WarningCollector.NOOP))
240+
.isInstanceOf(IllegalStateException.class)
241+
.hasMessage("Unexpected FilterNode found in plan; probably connector was not able to handle provided WHERE expression");
242+
}
243+
}

plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3541,9 +3541,12 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab
35413541
}
35423542

35433543
IcebergTableHandle originalHandle = (IcebergTableHandle) tableHandle;
3544+
3545+
if (originalHandle.isRecordScannedFiles()) {
3546+
return TableStatistics.empty();
3547+
}
35443548
// Certain table handle attributes are not applicable to select queries (which need stats).
35453549
// If this changes, the caching logic may here may need to be revised.
3546-
checkArgument(!originalHandle.isRecordScannedFiles(), "Unexpected scanned files recording set");
35473550
checkArgument(originalHandle.getMaxScannedFileSize().isEmpty(), "Unexpected max scanned file size set");
35483551

35493552
IcebergTableHandle cacheKey = new IcebergTableHandle(
@@ -3565,7 +3568,7 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab
35653568
false, // recordScannedFiles does not affect stats
35663569
originalHandle.getMaxScannedFileSize(),
35673570
ImmutableSet.of(), // constraintColumns do not affect stats
3568-
Optional.empty()); // forAnalyze does not affect stats
3571+
Optional.empty()); // forAnalyze does not affect stats
35693572
return getIncrementally(
35703573
tableStatisticsCache,
35713574
cacheKey,

plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@
113113
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
114114
import static io.trino.SystemSessionProperties.DETERMINE_PARTITION_COUNT_FOR_WRITE_ENABLED;
115115
import static io.trino.SystemSessionProperties.ENABLE_DYNAMIC_FILTERING;
116+
import static io.trino.SystemSessionProperties.IGNORE_STATS_CALCULATOR_FAILURES;
116117
import static io.trino.SystemSessionProperties.MAX_HASH_PARTITION_COUNT;
117118
import static io.trino.SystemSessionProperties.MAX_WRITER_TASK_COUNT;
118119
import static io.trino.SystemSessionProperties.SCALE_WRITERS;
@@ -6613,6 +6614,34 @@ public void testExpireSnapshotsParameterValidation()
66136614
"\\QRetention specified (33.00s) is shorter than the minimum retention configured in the system (7.00d). Minimum retention can be changed with iceberg.expire-snapshots.min-retention configuration property or iceberg.expire_snapshots_min_retention session property");
66146615
}
66156616

6617+
@Test
6618+
public void testExplainOptimize()
6619+
{
6620+
Session sessionWithIgnoreStatsCalculatorFailuresFalse = withIgnoreStatsCalculatorFailuresFalse(getSession());
6621+
6622+
String tableName = "test_explain_optimize" + randomNameSuffix();
6623+
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
6624+
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
6625+
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
6626+
6627+
assertExplain(sessionWithIgnoreStatsCalculatorFailuresFalse, "EXPLAIN ALTER TABLE " + tableName + " EXECUTE OPTIMIZE",
6628+
".*Output layout:.*");
6629+
}
6630+
6631+
@Test
6632+
public void testExplainAnalyzeOptimize()
6633+
{
6634+
Session sessionWithIgnoreStatsCalculatorFailuresFalse = withIgnoreStatsCalculatorFailuresFalse(getSession());
6635+
6636+
String tableName = "test_explain_analyze_optimize" + randomNameSuffix();
6637+
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
6638+
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
6639+
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
6640+
6641+
assertExplain(sessionWithIgnoreStatsCalculatorFailuresFalse, "EXPLAIN ANALYZE ALTER TABLE " + tableName + " EXECUTE OPTIMIZE",
6642+
".*Output layout:.*");
6643+
}
6644+
66166645
@Test
66176646
public void testRemoveOrphanFilesWithUnexpectedMissingManifest()
66186647
throws Exception
@@ -9321,6 +9350,13 @@ protected Session withoutSmallFileThreshold(Session session)
93219350
.build();
93229351
}
93239352

9353+
protected Session withIgnoreStatsCalculatorFailuresFalse(Session session)
9354+
{
9355+
return Session.builder(session)
9356+
.setSystemProperty(IGNORE_STATS_CALCULATOR_FAILURES, "false")
9357+
.build();
9358+
}
9359+
93249360
private Session withSingleWriterPerTask(Session session)
93259361
{
93269362
return Session.builder(session)

0 commit comments

Comments
 (0)