Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import io.trino.sql.PlannerContext;
import io.trino.sql.planner.AdaptivePlanner;
import io.trino.sql.planner.plan.ExchangeNode;
import io.trino.sql.planner.plan.ExplainAnalyzeNode;
import io.trino.sql.planner.plan.OutputNode;
import io.trino.sql.planner.plan.PlanNode;
import io.trino.sql.planner.plan.ProjectNode;
Expand Down Expand Up @@ -65,6 +66,7 @@ private boolean isAllowedNode(PlanNode node)
|| node instanceof TableExecuteNode
|| node instanceof OutputNode
|| node instanceof ExchangeNode
|| node instanceof TableFinishNode;
|| node instanceof TableFinishNode
|| node instanceof ExplainAnalyzeNode;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,243 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.sql.planner.sanity;

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.trino.connector.CatalogHandle;
import io.trino.execution.warnings.WarningCollector;
import io.trino.metadata.TableExecuteHandle;
import io.trino.metadata.TableHandle;
import io.trino.plugin.tpch.TpchTableHandle;
import io.trino.plugin.tpch.TpchTransactionHandle;
import io.trino.spi.connector.ColumnHandle;
import io.trino.spi.connector.SchemaTableName;
import io.trino.spi.connector.WriterScalingOptions;
import io.trino.spi.predicate.TupleDomain;
import io.trino.sql.ir.Constant;
import io.trino.sql.ir.Expression;
import io.trino.sql.planner.OrderingScheme;
import io.trino.sql.planner.Partitioning;
import io.trino.sql.planner.PartitioningScheme;
import io.trino.sql.planner.PlanNodeIdAllocator;
import io.trino.sql.planner.Symbol;
import io.trino.sql.planner.assertions.BasePlanTest;
import io.trino.sql.planner.plan.Assignments;
import io.trino.sql.planner.plan.ExchangeNode;
import io.trino.sql.planner.plan.ExplainAnalyzeNode;
import io.trino.sql.planner.plan.FilterNode;
import io.trino.sql.planner.plan.OutputNode;
import io.trino.sql.planner.plan.PlanNode;
import io.trino.sql.planner.plan.ProjectNode;
import io.trino.sql.planner.plan.StatisticAggregations;
import io.trino.sql.planner.plan.StatisticAggregationsDescriptor;
import io.trino.sql.planner.plan.TableExecuteNode;
import io.trino.sql.planner.plan.TableFinishNode;
import io.trino.sql.planner.plan.TableScanNode;
import io.trino.sql.planner.plan.TableWriterNode;
import io.trino.testing.TestingTableExecuteHandle;
import io.trino.testing.TestingTransactionHandle;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;

import java.util.List;
import java.util.Map;
import java.util.Optional;

import static io.trino.spi.type.BigintType.BIGINT;
import static io.trino.sql.planner.SystemPartitioningHandle.SINGLE_DISTRIBUTION;
import static io.trino.sql.planner.TestingPlannerContext.PLANNER_CONTEXT;
import static io.trino.sql.planner.plan.ExchangeNode.Scope.LOCAL;
import static io.trino.sql.planner.plan.ExchangeNode.Type.REPARTITION;
import static io.trino.testing.TestingHandles.TEST_CATALOG_HANDLE;
import static org.assertj.core.api.Assertions.assertThatThrownBy;

final class TestTableExecuteStructureValidator
extends BasePlanTest
{
private final PlanNodeIdAllocator idAllocator = new PlanNodeIdAllocator();
private final TableExecuteStructureValidator validator = new TableExecuteStructureValidator();
private final Expression predicate = new Constant(BIGINT, 1L);
private final Symbol symbol = new Symbol(BIGINT, "bigint");
private final List<Symbol> symbols = ImmutableList.of(symbol);
private final List<String> columnNames = ImmutableList.of("bigint");
private final Assignments assignments = Assignments.of(symbol, predicate);
private final Map<Symbol, ColumnHandle> assignmentsMap = ImmutableMap.of(symbol, new ColumnHandle() {});
private final Optional<OrderingScheme> orderingSchema = Optional.empty();
private final Optional<PartitioningScheme> partitioningSchema = Optional.empty();
private final Optional<StatisticAggregations> statisticAggregations = Optional.empty();
private final Optional<StatisticAggregationsDescriptor<Symbol>> statisticsAggregationDescriptor = Optional.empty();

private TableScanNode tableScanNode;
private TableWriterNode.TableExecuteTarget tableExecuteTarget;
private PartitioningScheme partitioningScheme;

@BeforeAll
void setup()
{
CatalogHandle catalogHandle = getCurrentCatalogHandle();

TableHandle nationTableHandle = new TableHandle(
catalogHandle,
new TpchTableHandle("sf1", "nation", 1.0),
TpchTransactionHandle.INSTANCE);

tableScanNode = new TableScanNode(
idAllocator.getNextId(),
nationTableHandle,
symbols,
assignmentsMap,
TupleDomain.all(),
Optional.empty(),
false,
Optional.empty());

tableExecuteTarget = new TableWriterNode.TableExecuteTarget(
new TableExecuteHandle(
TEST_CATALOG_HANDLE,
TestingTransactionHandle.create(),
new TestingTableExecuteHandle()),
Optional.empty(),
new SchemaTableName("schemaName", "tableName"),
WriterScalingOptions.DISABLED);

partitioningScheme = new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, symbols), symbols);
}

@Test
void testValidateSuccessfulWithExecuteNode()
{
PlanNode root = new OutputNode(idAllocator.getNextId(),
new ExplainAnalyzeNode(idAllocator.getNextId(),
new TableExecuteNode(idAllocator.getNextId(),
new ExchangeNode(idAllocator.getNextId(),
REPARTITION,
LOCAL,
partitioningScheme,
ImmutableList.of(new TableFinishNode(idAllocator.getNextId(),
new ExchangeNode(idAllocator.getNextId(),
REPARTITION,
LOCAL,
partitioningScheme,
ImmutableList.of(
new ProjectNode(idAllocator.getNextId(),
tableScanNode,
assignments)),
ImmutableList.of(symbols),
orderingSchema),
tableExecuteTarget,
symbol,
statisticAggregations,
statisticsAggregationDescriptor)),
ImmutableList.of(symbols),
orderingSchema),
tableExecuteTarget,
symbol,
symbol,
symbols,
columnNames,
partitioningSchema),
symbol,
symbols,
false),
columnNames,
symbols);
validator.validate(root, null, PLANNER_CONTEXT, WarningCollector.NOOP);
}

@Test
void testValidateSuccessfulWithoutExecuteNode()
{
PlanNode root = new OutputNode(idAllocator.getNextId(),
new ExplainAnalyzeNode(idAllocator.getNextId(),
new ExchangeNode(idAllocator.getNextId(),
REPARTITION,
LOCAL,
partitioningScheme,
ImmutableList.of(new TableFinishNode(idAllocator.getNextId(),
new ExchangeNode(idAllocator.getNextId(),
REPARTITION,
LOCAL,
partitioningScheme,
ImmutableList.of(
new ProjectNode(idAllocator.getNextId(),
tableScanNode,
assignments)),
ImmutableList.of(symbols),
orderingSchema),
tableExecuteTarget,
symbol,
statisticAggregations,
statisticsAggregationDescriptor)),
ImmutableList.of(symbols),
orderingSchema),
symbol,
symbols,
false),
columnNames,
symbols);
validator.validate(root, null, PLANNER_CONTEXT, WarningCollector.NOOP);
}

@Test
void testValidateFailed()
{
PlanNode root = new OutputNode(idAllocator.getNextId(),
new ExplainAnalyzeNode(idAllocator.getNextId(),
new TableExecuteNode(idAllocator.getNextId(),
new ExchangeNode(idAllocator.getNextId(),
REPARTITION,
LOCAL,
partitioningScheme,
ImmutableList.of(new FilterNode(idAllocator.getNextId(),
new ExchangeNode(idAllocator.getNextId(),
REPARTITION,
LOCAL,
partitioningScheme,
ImmutableList.of(new TableFinishNode(idAllocator.getNextId(),
new ExchangeNode(idAllocator.getNextId(),
REPARTITION,
LOCAL,
partitioningScheme,
ImmutableList.of(
new ProjectNode(idAllocator.getNextId(),
tableScanNode,
assignments)),
ImmutableList.of(symbols),
orderingSchema),
tableExecuteTarget,
symbol,
statisticAggregations,
statisticsAggregationDescriptor)),
ImmutableList.of(symbols),
orderingSchema),
predicate)),
ImmutableList.of(symbols),
orderingSchema),
tableExecuteTarget,
symbol,
symbol,
symbols,
columnNames,
partitioningSchema),
symbol,
symbols,
false),
columnNames,
symbols);
assertThatThrownBy(() -> validator.validate(root, null, PLANNER_CONTEXT, WarningCollector.NOOP))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Unexpected FilterNode found in plan; probably connector was not able to handle provided WHERE expression");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3541,9 +3541,12 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab
}

IcebergTableHandle originalHandle = (IcebergTableHandle) tableHandle;

if (originalHandle.isRecordScannedFiles()) {
return TableStatistics.empty();
}
// Certain table handle attributes are not applicable to select queries (which need stats).
// If this changes, the caching logic may here may need to be revised.
checkArgument(!originalHandle.isRecordScannedFiles(), "Unexpected scanned files recording set");
checkArgument(originalHandle.getMaxScannedFileSize().isEmpty(), "Unexpected max scanned file size set");

IcebergTableHandle cacheKey = new IcebergTableHandle(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static io.trino.SystemSessionProperties.DETERMINE_PARTITION_COUNT_FOR_WRITE_ENABLED;
import static io.trino.SystemSessionProperties.ENABLE_DYNAMIC_FILTERING;
import static io.trino.SystemSessionProperties.IGNORE_STATS_CALCULATOR_FAILURES;
import static io.trino.SystemSessionProperties.MAX_HASH_PARTITION_COUNT;
import static io.trino.SystemSessionProperties.MAX_WRITER_TASK_COUNT;
import static io.trino.SystemSessionProperties.SCALE_WRITERS;
Expand Down Expand Up @@ -6613,6 +6614,34 @@ public void testExpireSnapshotsParameterValidation()
"\\QRetention specified (33.00s) is shorter than the minimum retention configured in the system (7.00d). Minimum retention can be changed with iceberg.expire-snapshots.min-retention configuration property or iceberg.expire_snapshots_min_retention session property");
}

@Test
public void testExplainOptimize()
{
Session sessionWithIgnoreStatsCalculatorFailuresFalse = withIgnoreStatsCalculatorFailuresFalse(getSession());

String tableName = "test_explain_optimize" + randomNameSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);

assertExplain(sessionWithIgnoreStatsCalculatorFailuresFalse, "EXPLAIN ALTER TABLE " + tableName + " EXECUTE OPTIMIZE",
".*Output layout:.*");
}

@Test
public void testExplainAnalyzeOptimize()
{
Session sessionWithIgnoreStatsCalculatorFailuresFalse = withIgnoreStatsCalculatorFailuresFalse(getSession());

String tableName = "test_explain_analyze_optimize" + randomNameSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);

assertExplain(sessionWithIgnoreStatsCalculatorFailuresFalse, "EXPLAIN ANALYZE ALTER TABLE " + tableName + " EXECUTE OPTIMIZE",
".*Output layout:.*");
}

@Test
public void testRemoveOrphanFilesWithUnexpectedMissingManifest()
throws Exception
Expand Down Expand Up @@ -9321,6 +9350,13 @@ protected Session withoutSmallFileThreshold(Session session)
.build();
}

protected Session withIgnoreStatsCalculatorFailuresFalse(Session session)
{
return Session.builder(session)
.setSystemProperty(IGNORE_STATS_CALCULATOR_FAILURES, "false")
.build();
}

private Session withSingleWriterPerTask(Session session)
{
return Session.builder(session)
Expand Down
Loading