Skip to content
This repository was archived by the owner on Mar 12, 2026. It is now read-only.

Commit 62ffffc

Browse files
deps: bump datafusion (#1445)
## Rationale Close #1461 ## Detailed Changes Bump datafusion to https://github.com/CeresDB/arrow-datafusion/commits/e21b03154, which is version 33. Some important breaking changes: - apache/datafusion#7920 - apache/datafusion#9109 ## Test Plan CI --------- Co-authored-by: jiacai2050 <dev@liujiacai.net>
1 parent 8156b32 commit 62ffffc

46 files changed

Lines changed: 685 additions & 370 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Cargo.lock

Lines changed: 316 additions & 233 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,8 @@ members = [
8585

8686
[workspace.dependencies]
8787
alloc_tracker = { path = "src/components/alloc_tracker" }
88-
arrow = { version = "43.0.0", features = ["prettyprint"] }
89-
arrow_ipc = { version = "43.0.0" }
88+
arrow = { version = "49.0.0", features = ["prettyprint"] }
89+
arrow_ipc = { version = "49.0.0" }
9090
arrow_ext = { path = "src/components/arrow_ext" }
9191
analytic_engine = { path = "src/analytic_engine" }
9292
arena = { path = "src/components/arena" }
@@ -107,8 +107,8 @@ cluster = { path = "src/cluster" }
107107
criterion = "0.5"
108108
horaedb-client = "1.0.2"
109109
common_types = { path = "src/common_types" }
110-
datafusion = { git = "https://github.com/CeresDB/arrow-datafusion.git", rev = "9c3a537e25e5ab3299922864034f67fb2f79805d" }
111-
datafusion-proto = { git = "https://github.com/CeresDB/arrow-datafusion.git", rev = "9c3a537e25e5ab3299922864034f67fb2f79805d" }
110+
datafusion = { git = "https://github.com/CeresDB/arrow-datafusion.git", rev = "e21b03154" }
111+
datafusion-proto = { git = "https://github.com/CeresDB/arrow-datafusion.git", rev = "e21b03154" }
112112
derive_builder = "0.12"
113113
df_operator = { path = "src/df_operator" }
114114
df_engine_extensions = { path = "src/df_engine_extensions" }
@@ -121,10 +121,10 @@ hash_ext = { path = "src/components/hash_ext" }
121121
hex = "0.4.3"
122122
hyperloglog = { git = "https://github.com/jedisct1/rust-hyperloglog.git", rev = "425487ce910f26636fbde8c4d640b538431aad50" }
123123
id_allocator = { path = "src/components/id_allocator" }
124-
influxql-logical-planner = { git = "https://github.com/CeresDB/influxql.git", rev = "a905863", package = "iox_query_influxql" }
125-
influxql-parser = { git = "https://github.com/CeresDB/influxql.git", rev = "a905863", package = "influxdb_influxql_parser" }
126-
influxql-query = { git = "https://github.com/CeresDB/influxql.git", rev = "a905863", package = "iox_query" }
127-
influxql-schema = { git = "https://github.com/CeresDB/influxql.git", rev = "a905863", package = "schema" }
124+
influxql-logical-planner = { git = "https://github.com/CeresDB/influxql.git", rev = "b9fb3ca", package = "iox_query_influxql" }
125+
influxql-parser = { git = "https://github.com/CeresDB/influxql.git", rev = "b9fb3ca", package = "influxdb_influxql_parser" }
126+
influxql-query = { git = "https://github.com/CeresDB/influxql.git", rev = "b9fb3ca", package = "iox_query" }
127+
influxql-schema = { git = "https://github.com/CeresDB/influxql.git", rev = "b9fb3ca", package = "schema" }
128128
interpreters = { path = "src/interpreters" }
129129
itertools = "0.10.5"
130130
lz4_flex = { version = "0.11", default-features = false, features = ["frame"] }
@@ -142,7 +142,7 @@ panic_ext = { path = "src/components/panic_ext" }
142142
partitioned_lock = { path = "src/components/partitioned_lock" }
143143
partition_table_engine = { path = "src/partition_table_engine" }
144144
parquet_ext = { path = "src/components/parquet_ext" }
145-
parquet = { version = "43.0.0" }
145+
parquet = { version = "49.0.0" }
146146
paste = "1.0"
147147
pin-project-lite = "0.2.8"
148148
pprof = "0.12.1"
@@ -172,9 +172,9 @@ size_ext = { path = "src/components/size_ext" }
172172
smallvec = "1.6"
173173
slog = "2.7"
174174
spin = "0.9.6"
175-
sqlparser = { version = "0.35", features = ["serde"] }
176-
system_catalog = { path = "src/system_catalog" }
177175
system_statis = { path = "src/components/system_stats" }
176+
sqlparser = { version = "0.39.0", features = ["serde"] }
177+
system_catalog = { path = "src/system_catalog" }
178178
table_engine = { path = "src/table_engine" }
179179
table_kv = { path = "src/components/table_kv" }
180180
tempfile = "3.1.0"

integration_tests/cases/common/dml/issue-1087.result

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ String("logical_plan after inline_table_scan"),String("SAME TEXT AS ABOVE"),
1717
String("logical_plan after type_coercion"),String("SAME TEXT AS ABOVE"),
1818
String("logical_plan after count_wildcard_rule"),String("SAME TEXT AS ABOVE"),
1919
String("analyzed_logical_plan"),String("SAME TEXT AS ABOVE"),
20+
String("logical_plan after eliminate_nested_union"),String("SAME TEXT AS ABOVE"),
2021
String("logical_plan after simplify_expressions"),String("SAME TEXT AS ABOVE"),
2122
String("logical_plan after unwrap_cast_in_comparison"),String("SAME TEXT AS ABOVE"),
2223
String("logical_plan after replace_distinct_aggregate"),String("SAME TEXT AS ABOVE"),
@@ -33,6 +34,7 @@ String("logical_plan after eliminate_cross_join"),String("SAME TEXT AS ABOVE"),
3334
String("logical_plan after common_sub_expression_eliminate"),String("SAME TEXT AS ABOVE"),
3435
String("logical_plan after eliminate_limit"),String("SAME TEXT AS ABOVE"),
3536
String("logical_plan after propagate_empty_relation"),String("SAME TEXT AS ABOVE"),
37+
String("logical_plan after eliminate_one_union"),String("SAME TEXT AS ABOVE"),
3638
String("logical_plan after filter_null_join_keys"),String("SAME TEXT AS ABOVE"),
3739
String("logical_plan after eliminate_outer_join"),String("SAME TEXT AS ABOVE"),
3840
String("logical_plan after push_down_limit"),String("SAME TEXT AS ABOVE"),
@@ -46,6 +48,7 @@ String("logical_plan after eliminate_projection"),String("TableScan: issue_1087
4648
String("logical_plan after push_down_limit"),String("SAME TEXT AS ABOVE"),
4749
String("logical_plan after influx_regex_to_datafusion_regex"),String("SAME TEXT AS ABOVE"),
4850
String("logical_plan after handle_gap_fill"),String("SAME TEXT AS ABOVE"),
51+
String("logical_plan after eliminate_nested_union"),String("SAME TEXT AS ABOVE"),
4952
String("logical_plan after simplify_expressions"),String("SAME TEXT AS ABOVE"),
5053
String("logical_plan after unwrap_cast_in_comparison"),String("SAME TEXT AS ABOVE"),
5154
String("logical_plan after replace_distinct_aggregate"),String("SAME TEXT AS ABOVE"),
@@ -62,6 +65,7 @@ String("logical_plan after eliminate_cross_join"),String("SAME TEXT AS ABOVE"),
6265
String("logical_plan after common_sub_expression_eliminate"),String("SAME TEXT AS ABOVE"),
6366
String("logical_plan after eliminate_limit"),String("SAME TEXT AS ABOVE"),
6467
String("logical_plan after propagate_empty_relation"),String("SAME TEXT AS ABOVE"),
68+
String("logical_plan after eliminate_one_union"),String("SAME TEXT AS ABOVE"),
6569
String("logical_plan after filter_null_join_keys"),String("SAME TEXT AS ABOVE"),
6670
String("logical_plan after eliminate_outer_join"),String("SAME TEXT AS ABOVE"),
6771
String("logical_plan after push_down_limit"),String("SAME TEXT AS ABOVE"),
@@ -76,17 +80,22 @@ String("logical_plan after push_down_limit"),String("SAME TEXT AS ABOVE"),
7680
String("logical_plan after influx_regex_to_datafusion_regex"),String("SAME TEXT AS ABOVE"),
7781
String("logical_plan after handle_gap_fill"),String("SAME TEXT AS ABOVE"),
7882
String("logical_plan"),String("TableScan: issue_1087 projection=[tsid, t, name, value]"),
79-
String("initial_physical_plan"),String("ScanTable: table=issue_1087, parallelism=8, priority=Low\n"),
83+
String("initial_physical_plan"),String("ScanTable: table=issue_1087, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
84+
String("initial_physical_plan_with_stats"),String("ScanTable: table=issue_1087, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8), statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:),(Col[3]:)]]\n"),
85+
String("physical_plan after OutputRequirements"),String("OutputRequirementExec\n ScanTable: table=issue_1087, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
8086
String("physical_plan after aggregate_statistics"),String("SAME TEXT AS ABOVE"),
8187
String("physical_plan after join_selection"),String("SAME TEXT AS ABOVE"),
82-
String("physical_plan after PipelineFixer"),String("SAME TEXT AS ABOVE"),
83-
String("physical_plan after repartition"),String("SAME TEXT AS ABOVE"),
88+
String("physical_plan after LimitedDistinctAggregation"),String("SAME TEXT AS ABOVE"),
8489
String("physical_plan after EnforceDistribution"),String("SAME TEXT AS ABOVE"),
8590
String("physical_plan after CombinePartialFinalAggregate"),String("SAME TEXT AS ABOVE"),
8691
String("physical_plan after EnforceSorting"),String("SAME TEXT AS ABOVE"),
8792
String("physical_plan after coalesce_batches"),String("SAME TEXT AS ABOVE"),
93+
String("physical_plan after OutputRequirements"),String("ScanTable: table=issue_1087, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
8894
String("physical_plan after PipelineChecker"),String("SAME TEXT AS ABOVE"),
89-
String("physical_plan"),String("ScanTable: table=issue_1087, parallelism=8, priority=Low\n"),
95+
String("physical_plan after LimitAggregation"),String("SAME TEXT AS ABOVE"),
96+
String("physical_plan after ProjectionPushdown"),String("SAME TEXT AS ABOVE"),
97+
String("physical_plan"),String("ScanTable: table=issue_1087, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
98+
String("physical_plan_with_stats"),String("ScanTable: table=issue_1087, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8), statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:),(Col[3]:)]]\n"),
9099

91100

92101
DROP TABLE `issue_1087`;

integration_tests/cases/common/dml/issue-302.result

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ affected_rows: 1
1212

1313
select `t`, count(distinct name) from issue302 group by `t`;
1414

15-
issue302.t,COUNT(DISTINCT issue302.name),
15+
t,COUNT(DISTINCT issue302.name),
1616
Timestamp(1651737067000),Int64(0),
1717

1818

integration_tests/cases/common/dml/issue-341.result

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ WHERE
5858

5959
plan_type,plan,
6060
String("logical_plan"),String("TableScan: issue341_t1 projection=[timestamp, value], full_filters=[issue341_t1.value = Int32(3)]"),
61-
String("physical_plan"),String("ScanTable: table=issue341_t1, parallelism=8, priority=Low\n"),
61+
String("physical_plan"),String("ScanTable: table=issue341_t1, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
6262

6363

6464
-- FilterExec node should not be in plan.
@@ -71,8 +71,8 @@ WHERE
7171
tag1 = "t3";
7272

7373
plan_type,plan,
74-
String("logical_plan"),String("Projection: issue341_t1.timestamp, issue341_t1.value\n TableScan: issue341_t1 projection=[timestamp, value, tag1], full_filters=[issue341_t1.tag1 = Utf8(\"t3\")]"),
75-
String("physical_plan"),String("ProjectionExec: expr=[timestamp@0 as timestamp, value@1 as value]\n ScanTable: table=issue341_t1, parallelism=8, priority=Low\n"),
74+
String("logical_plan"),String("TableScan: issue341_t1 projection=[timestamp, value], full_filters=[issue341_t1.tag1 = Utf8(\"t3\")]"),
75+
String("physical_plan"),String("ProjectionExec: expr=[timestamp@0 as timestamp, value@1 as value]\n ScanTable: table=issue341_t1, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
7676

7777

7878
-- Repeat operations above, but with overwrite table
@@ -116,7 +116,7 @@ WHERE
116116

117117
plan_type,plan,
118118
String("logical_plan"),String("Filter: issue341_t2.value = Float64(3)\n TableScan: issue341_t2 projection=[timestamp, value], partial_filters=[issue341_t2.value = Float64(3)]"),
119-
String("physical_plan"),String("CoalesceBatchesExec: target_batch_size=8192\n FilterExec: value@1 = 3\n ScanTable: table=issue341_t2, parallelism=8, priority=Low\n"),
119+
String("physical_plan"),String("CoalesceBatchesExec: target_batch_size=8192\n FilterExec: value@1 = 3\n ScanTable: table=issue341_t2, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
120120

121121

122122
-- When using tag as filter, FilterExec node should not be in plan.
@@ -129,8 +129,8 @@ WHERE
129129
tag1 = "t3";
130130

131131
plan_type,plan,
132-
String("logical_plan"),String("Projection: issue341_t2.timestamp, issue341_t2.value\n TableScan: issue341_t2 projection=[timestamp, value, tag1], full_filters=[issue341_t2.tag1 = Utf8(\"t3\")]"),
133-
String("physical_plan"),String("ProjectionExec: expr=[timestamp@0 as timestamp, value@1 as value]\n ScanTable: table=issue341_t2, parallelism=8, priority=Low\n"),
132+
String("logical_plan"),String("TableScan: issue341_t2 projection=[timestamp, value], full_filters=[issue341_t2.tag1 = Utf8(\"t3\")]"),
133+
String("physical_plan"),String("ProjectionExec: expr=[timestamp@0 as timestamp, value@1 as value]\n ScanTable: table=issue341_t2, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
134134

135135

136136
DROP TABLE IF EXISTS `issue341_t1`;

integration_tests/cases/common/dml/issue-59.result

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ FROM issue59
2424
GROUP BY id+1;
2525

2626
plan_type,plan,
27-
String("logical_plan"),String("Projection: group_alias_0 AS issue59.id + Int64(1), COUNT(alias1) AS COUNT(DISTINCT issue59.account)\n Aggregate: groupBy=[[group_alias_0]], aggr=[[COUNT(alias1)]]\n Projection: group_alias_0, alias1\n Aggregate: groupBy=[[CAST(issue59.id AS Int64) + Int64(1) AS group_alias_0, issue59.account AS alias1]], aggr=[[]]\n TableScan: issue59 projection=[id, account]"),
28-
String("physical_plan"),String("ProjectionExec: expr=[group_alias_0@0 as issue59.id + Int64(1), COUNT(alias1)@1 as COUNT(DISTINCT issue59.account)]\n AggregateExec: mode=FinalPartitioned, gby=[group_alias_0@0 as group_alias_0], aggr=[COUNT(alias1)]\n CoalesceBatchesExec: target_batch_size=8192\n RepartitionExec: partitioning=Hash([group_alias_0@0], 8), input_partitions=8\n AggregateExec: mode=Partial, gby=[group_alias_0@0 as group_alias_0], aggr=[COUNT(alias1)]\n ProjectionExec: expr=[group_alias_0@0 as group_alias_0, alias1@1 as alias1]\n AggregateExec: mode=FinalPartitioned, gby=[group_alias_0@0 as group_alias_0, alias1@1 as alias1], aggr=[]\n CoalesceBatchesExec: target_batch_size=8192\n RepartitionExec: partitioning=Hash([group_alias_0@0, alias1@1], 8), input_partitions=8\n AggregateExec: mode=Partial, gby=[CAST(id@0 AS Int64) + 1 as group_alias_0, account@1 as alias1], aggr=[]\n ScanTable: table=issue59, parallelism=8, priority=Low\n"),
27+
String("logical_plan"),String("Projection: group_alias_0 AS issue59.id + Int64(1), COUNT(alias1) AS COUNT(DISTINCT issue59.account)\n Aggregate: groupBy=[[group_alias_0]], aggr=[[COUNT(alias1)]]\n Aggregate: groupBy=[[CAST(issue59.id AS Int64) + Int64(1) AS group_alias_0, issue59.account AS alias1]], aggr=[[]]\n TableScan: issue59 projection=[id, account]"),
28+
String("physical_plan"),String("ProjectionExec: expr=[group_alias_0@0 as issue59.id + Int64(1), COUNT(alias1)@1 as COUNT(DISTINCT issue59.account)]\n AggregateExec: mode=FinalPartitioned, gby=[group_alias_0@0 as group_alias_0], aggr=[COUNT(alias1)]\n CoalesceBatchesExec: target_batch_size=8192\n RepartitionExec: partitioning=Hash([group_alias_0@0], 8), input_partitions=8\n AggregateExec: mode=Partial, gby=[group_alias_0@0 as group_alias_0], aggr=[COUNT(alias1)]\n AggregateExec: mode=FinalPartitioned, gby=[group_alias_0@0 as group_alias_0, alias1@1 as alias1], aggr=[]\n CoalesceBatchesExec: target_batch_size=8192\n RepartitionExec: partitioning=Hash([group_alias_0@0, alias1@1], 8), input_partitions=8\n AggregateExec: mode=Partial, gby=[CAST(id@0 AS Int64) + 1 as group_alias_0, account@1 as alias1], aggr=[]\n ScanTable: table=issue59, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
2929

3030

3131
DROP TABLE IF EXISTS issue59;

integration_tests/cases/common/explain/explain.result

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ EXPLAIN SELECT t FROM `04_explain_t`;
1010

1111
plan_type,plan,
1212
String("logical_plan"),String("TableScan: 04_explain_t projection=[t]"),
13-
String("physical_plan"),String("ScanTable: table=04_explain_t, parallelism=8, priority=Low\n"),
13+
String("physical_plan"),String("ScanTable: table=04_explain_t, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
1414

1515

1616
DROP TABLE `04_explain_t`;

integration_tests/cases/common/function/aggregate.result

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,50 @@ COUNT(DISTINCT 02_function_aggregate_table1.arch),
105105
Int64(2),
106106

107107

108+
CREATE TABLE `02_function_aggregate_table2` (
109+
`timestamp` timestamp NOT NULL,
110+
`arch` string TAG,
111+
`datacenter` string TAG,
112+
`value` int,
113+
`uvalue` uint64,
114+
timestamp KEY (timestamp)) ENGINE=Analytic
115+
WITH(
116+
enable_ttl='false',
117+
update_mode = 'append'
118+
);
119+
120+
affected_rows: 0
121+
122+
INSERT INTO `02_function_aggregate_table2`
123+
(`timestamp`, `arch`, `datacenter`, `value`, `uvalue`)
124+
VALUES
125+
(1658304762, 'x86-64', 'china', 100, 10),
126+
(1658304763, 'x86-64', 'china', 200, 10),
127+
(1658304762, 'arm64', 'china', 110, 0),
128+
(1658304763, 'arm64', 'china', 210, 0);
129+
130+
affected_rows: 4
131+
132+
-- The should select empty column
133+
SELECT count(*) FROM `02_function_aggregate_table1`;
134+
135+
COUNT(*),
136+
Int64(4),
137+
138+
139+
-- Same with before, but query from sst
140+
-- SQLNESS ARG pre_cmd=flush
141+
SELECT count(*) FROM `02_function_aggregate_table1`;
142+
143+
COUNT(*),
144+
Int64(4),
145+
146+
108147
DROP TABLE `02_function_aggregate_table1`;
109148

110149
affected_rows: 0
111150

151+
DROP TABLE `02_function_aggregate_table2`;
152+
153+
affected_rows: 0
154+

integration_tests/cases/common/function/aggregate.sql

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,4 +57,32 @@ SELECT distinct(`arch`) FROM `02_function_aggregate_table1` ORDER BY `arch` DESC
5757

5858
SELECT count(distinct(`arch`)) FROM `02_function_aggregate_table1`;
5959

60+
CREATE TABLE `02_function_aggregate_table2` (
61+
`timestamp` timestamp NOT NULL,
62+
`arch` string TAG,
63+
`datacenter` string TAG,
64+
`value` int,
65+
`uvalue` uint64,
66+
timestamp KEY (timestamp)) ENGINE=Analytic
67+
WITH(
68+
enable_ttl='false',
69+
update_mode = 'append'
70+
);
71+
72+
INSERT INTO `02_function_aggregate_table2`
73+
(`timestamp`, `arch`, `datacenter`, `value`, `uvalue`)
74+
VALUES
75+
(1658304762, 'x86-64', 'china', 100, 10),
76+
(1658304763, 'x86-64', 'china', 200, 10),
77+
(1658304762, 'arm64', 'china', 110, 0),
78+
(1658304763, 'arm64', 'china', 210, 0);
79+
80+
-- The should select empty column
81+
SELECT count(*) FROM `02_function_aggregate_table1`;
82+
83+
-- Same with before, but query from sst
84+
-- SQLNESS ARG pre_cmd=flush
85+
SELECT count(*) FROM `02_function_aggregate_table1`;
86+
6087
DROP TABLE `02_function_aggregate_table1`;
88+
DROP TABLE `02_function_aggregate_table2`;

integration_tests/cases/common/optimizer/optimizer.result

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ EXPLAIN SELECT max(value) AS c1, avg(value) AS c2 FROM `07_optimizer_t` GROUP BY
1010

1111
plan_type,plan,
1212
String("logical_plan"),String("Projection: MAX(07_optimizer_t.value) AS c1, AVG(07_optimizer_t.value) AS c2\n Aggregate: groupBy=[[07_optimizer_t.name]], aggr=[[MAX(07_optimizer_t.value), AVG(07_optimizer_t.value)]]\n TableScan: 07_optimizer_t projection=[name, value]"),
13-
String("physical_plan"),String("ProjectionExec: expr=[MAX(07_optimizer_t.value)@1 as c1, AVG(07_optimizer_t.value)@2 as c2]\n AggregateExec: mode=FinalPartitioned, gby=[name@0 as name], aggr=[MAX(07_optimizer_t.value), AVG(07_optimizer_t.value)]\n CoalesceBatchesExec: target_batch_size=8192\n RepartitionExec: partitioning=Hash([name@0], 8), input_partitions=8\n AggregateExec: mode=Partial, gby=[name@0 as name], aggr=[MAX(07_optimizer_t.value), AVG(07_optimizer_t.value)]\n ScanTable: table=07_optimizer_t, parallelism=8, priority=Low\n"),
13+
String("physical_plan"),String("ProjectionExec: expr=[MAX(07_optimizer_t.value)@1 as c1, AVG(07_optimizer_t.value)@2 as c2]\n AggregateExec: mode=FinalPartitioned, gby=[name@0 as name], aggr=[MAX(07_optimizer_t.value), AVG(07_optimizer_t.value)]\n CoalesceBatchesExec: target_batch_size=8192\n RepartitionExec: partitioning=Hash([name@0], 8), input_partitions=8\n AggregateExec: mode=Partial, gby=[name@0 as name], aggr=[MAX(07_optimizer_t.value), AVG(07_optimizer_t.value)]\n ScanTable: table=07_optimizer_t, parallelism=8, priority=Low, partition_count=UnknownPartitioning(8)\n"),
1414

1515

1616
DROP TABLE `07_optimizer_t`;

0 commit comments

Comments
 (0)