Skip to content

Commit 46484ea

Browse files
Weijun-HDandandan
authored andcommitted
feat: add benchmarks for json parser (apache#9107)
# Which issue does this PR close? <!-- We generally require a GitHub issue to be filed for all bug fixes and enhancements and this helps us generate change logs for our releases. You can link an issue to this PR using the GitHub syntax. --> - Closes #NNN. # Rationale for this change <!-- Why are you proposing this change? If this is already explained clearly in the issue then this section is not needed. Explaining clearly why changes are proposed helps reviewers understand your changes and offer better suggestions for fixes. --> Add targeted JSON reader benchmarks to track performance for wide objects, hex-encoded binary inputs, and projection workloads. # What changes are included in this PR? - Add `arrow-json/benches/wide_object.rs` for wide-object decode/serialize benchmarks. - Add `arrow-json/benches/binary_hex.rs` for hex string decoding into Binary/FixedSizeBinary/BinaryView. - Add `arrow-json/benches/wide_projection.rs` for full vs projected schema decoding. <!-- There is no need to duplicate the description in the issue here but it is sometimes worth providing a summary of the individual changes in this PR. --> # Are these changes tested? No <!-- We typically require tests for all PRs in order to: 1. Prevent the code from being accidentally broken by subsequent changes 2. Serve as another way to document the expected behavior of the code If tests are not included in your PR, please explain why (for example, are they covered by existing tests)? --> # Are there any user-facing changes? No <!-- If there are user-facing changes then we may require documentation to be updated before approving the PR. If there are any breaking changes to public APIs, please call them out. -->
1 parent e8919b1 commit 46484ea

File tree

2 files changed

+254
-0
lines changed

2 files changed

+254
-0
lines changed

arrow-json/Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,3 +65,7 @@ rand = { version = "0.9", default-features = false, features = ["std", "std_rng"
6565
[[bench]]
6666
name = "serde"
6767
harness = false
68+
69+
[[bench]]
70+
name = "json-reader"
71+
harness = false

arrow-json/benches/json-reader.rs

Lines changed: 250 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,250 @@
1+
// Licensed to the Apache Software Foundation (ASF) under one
2+
// or more contributor license agreements. See the NOTICE file
3+
// distributed with this work for additional information
4+
// regarding copyright ownership. The ASF licenses this file
5+
// to you under the Apache License, Version 2.0 (the
6+
// "License"); you may not use this file except in compliance
7+
// with the License. You may obtain a copy of the License at
8+
//
9+
// http://www.apache.org/licenses/LICENSE-2.0
10+
//
11+
// Unless required by applicable law or agreed to in writing,
12+
// software distributed under the License is distributed on an
13+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14+
// KIND, either express or implied. See the License for the
15+
// specific language governing permissions and limitations
16+
// under the License.
17+
18+
use arrow_json::ReaderBuilder;
19+
use arrow_json::reader::Decoder;
20+
use arrow_schema::{DataType, Field, Schema};
21+
use criterion::{
22+
BenchmarkId, Criterion, SamplingMode, Throughput, criterion_group, criterion_main,
23+
};
24+
use serde_json::{Map, Number, Value};
25+
use std::fmt::Write;
26+
use std::hint::black_box;
27+
use std::sync::Arc;
28+
29+
const ROWS: usize = 1 << 17; // 128K rows
30+
const BATCH_SIZE: usize = 1 << 13; // 8K rows per batch
31+
32+
const WIDE_FIELDS: usize = 64;
33+
const BINARY_BYTES: usize = 64;
34+
const WIDE_PROJECTION_TOTAL_FIELDS: usize = 100; // 100 fields total, select only 3
35+
36+
fn decode_and_flush(decoder: &mut Decoder, data: &[u8]) {
37+
let mut offset = 0;
38+
while offset < data.len() {
39+
let read = decoder.decode(black_box(&data[offset..])).unwrap();
40+
if read == 0 {
41+
break;
42+
}
43+
offset += read;
44+
while let Some(_batch) = decoder.flush().unwrap() {}
45+
}
46+
}
47+
48+
fn build_schema(field_count: usize) -> Arc<Schema> {
49+
// Builds a schema with fields named f0..f{field_count-1}, all Int64 and non-nullable.
50+
let fields: Vec<Field> = (0..field_count)
51+
.map(|i| Field::new(format!("f{i}"), DataType::Int64, false))
52+
.collect();
53+
Arc::new(Schema::new(fields))
54+
}
55+
56+
fn build_projection_schema(indices: &[usize]) -> Arc<Schema> {
57+
let fields: Vec<Field> = indices
58+
.iter()
59+
.map(|i| Field::new(format!("f{i}"), DataType::Int64, false))
60+
.collect();
61+
Arc::new(Schema::new(fields))
62+
}
63+
64+
fn build_wide_json(rows: usize, fields: usize) -> Vec<u8> {
65+
// Builds newline-delimited JSON objects with "wide" schema.
66+
// Example (rows=2, fields=3):
67+
// {"f0":0,"f1":1,"f2":2}
68+
// {"f0":1,"f1":2,"f2":3}
69+
let mut out = String::with_capacity(rows * fields * 12);
70+
for row in 0..rows {
71+
out.push('{');
72+
for field in 0..fields {
73+
if field > 0 {
74+
out.push(',');
75+
}
76+
let value = row as i64 + field as i64;
77+
write!(&mut out, "\"f{field}\":{value}").unwrap();
78+
}
79+
out.push('}');
80+
out.push('\n');
81+
}
82+
out.into_bytes()
83+
}
84+
85+
fn build_wide_values(rows: usize, fields: usize) -> Vec<Value> {
86+
// Mirrors build_wide_json but returns structured serde_json::Value objects.
87+
let mut out = Vec::with_capacity(rows);
88+
for row in 0..rows {
89+
let mut map = Map::with_capacity(fields);
90+
for field in 0..fields {
91+
let key = format!("f{field}");
92+
let value = Number::from((row + field) as i64);
93+
map.insert(key, Value::Number(value));
94+
}
95+
out.push(Value::Object(map));
96+
}
97+
out
98+
}
99+
100+
fn bench_decode_wide_object(c: &mut Criterion) {
101+
let data = build_wide_json(ROWS, WIDE_FIELDS);
102+
let schema = build_schema(WIDE_FIELDS);
103+
104+
c.bench_function("decode_wide_object_i64_json", |b| {
105+
b.iter(|| {
106+
let mut decoder = ReaderBuilder::new(schema.clone())
107+
.with_batch_size(BATCH_SIZE)
108+
.build_decoder()
109+
.unwrap();
110+
decode_and_flush(&mut decoder, &data);
111+
})
112+
});
113+
}
114+
115+
fn bench_serialize_wide_object(c: &mut Criterion) {
116+
let values = build_wide_values(ROWS, WIDE_FIELDS);
117+
let schema = build_schema(WIDE_FIELDS);
118+
119+
c.bench_function("decode_wide_object_i64_serialize", |b| {
120+
b.iter(|| {
121+
let mut decoder = ReaderBuilder::new(schema.clone())
122+
.with_batch_size(BATCH_SIZE)
123+
.build_decoder()
124+
.unwrap();
125+
126+
decoder.serialize(&values).unwrap();
127+
while let Some(_batch) = decoder.flush().unwrap() {}
128+
})
129+
});
130+
}
131+
132+
fn bench_decode_binary(c: &mut Criterion, name: &str, data: &[u8], field: Arc<Field>) {
133+
c.bench_function(name, |b| {
134+
b.iter(|| {
135+
let mut decoder = ReaderBuilder::new_with_field(field.clone())
136+
.with_batch_size(BATCH_SIZE)
137+
.build_decoder()
138+
.unwrap();
139+
decode_and_flush(&mut decoder, data);
140+
})
141+
});
142+
}
143+
144+
#[inline]
145+
fn append_hex_byte(buf: &mut String, byte: u8) {
146+
const HEX: &[u8; 16] = b"0123456789abcdef";
147+
buf.push(HEX[(byte >> 4) as usize] as char);
148+
buf.push(HEX[(byte & 0x0f) as usize] as char);
149+
}
150+
151+
fn build_hex_lines(rows: usize, bytes_per_row: usize) -> Vec<u8> {
152+
let mut data = String::with_capacity(rows * (bytes_per_row * 2 + 3));
153+
for row in 0..rows {
154+
data.push('"');
155+
for i in 0..bytes_per_row {
156+
let byte = ((row + i) & 0xff) as u8;
157+
append_hex_byte(&mut data, byte);
158+
}
159+
data.push('"');
160+
data.push('\n');
161+
}
162+
data.into_bytes()
163+
}
164+
165+
fn bench_binary_hex(c: &mut Criterion) {
166+
let binary_data = build_hex_lines(ROWS, BINARY_BYTES);
167+
168+
let binary_field = Arc::new(Field::new("item", DataType::Binary, false));
169+
bench_decode_binary(c, "decode_binary_hex_json", &binary_data, binary_field);
170+
171+
let fixed_field = Arc::new(Field::new(
172+
"item",
173+
DataType::FixedSizeBinary(BINARY_BYTES as i32),
174+
false,
175+
));
176+
bench_decode_binary(c, "decode_fixed_binary_hex_json", &binary_data, fixed_field);
177+
178+
let view_field = Arc::new(Field::new("item", DataType::BinaryView, false));
179+
bench_decode_binary(c, "decode_binary_view_hex_json", &binary_data, view_field);
180+
}
181+
182+
fn bench_decode_schema(c: &mut Criterion, name: &str, data: &[u8], schema: Arc<Schema>) {
183+
let mut group = c.benchmark_group(name);
184+
group.throughput(Throughput::Bytes(data.len() as u64));
185+
group.sample_size(50);
186+
group.measurement_time(std::time::Duration::from_secs(5));
187+
group.warm_up_time(std::time::Duration::from_secs(2));
188+
group.sampling_mode(SamplingMode::Flat);
189+
group.bench_function(BenchmarkId::from_parameter(ROWS), |b| {
190+
b.iter(|| {
191+
let mut decoder = ReaderBuilder::new(schema.clone())
192+
.with_batch_size(BATCH_SIZE)
193+
.build_decoder()
194+
.unwrap();
195+
decode_and_flush(&mut decoder, data);
196+
})
197+
});
198+
group.finish();
199+
}
200+
201+
fn build_wide_projection_json(rows: usize, total_fields: usize) -> Vec<u8> {
202+
// Estimate: each field ~15 bytes ("fXX":VVVVVVV,), total ~15*100 + overhead
203+
let per_row_size = total_fields * 15 + 10;
204+
let mut data = String::with_capacity(rows * per_row_size);
205+
206+
for _row in 0..rows {
207+
data.push('{');
208+
for i in 0..total_fields {
209+
if i > 0 {
210+
data.push(',');
211+
}
212+
// Use fixed-width values for stable benchmarks: 7 digits
213+
let _ = write!(data, "\"f{}\":{:07}", i, i);
214+
}
215+
data.push('}');
216+
data.push('\n');
217+
}
218+
data.into_bytes()
219+
}
220+
221+
fn bench_wide_projection(c: &mut Criterion) {
222+
// Wide projection workload: tests overhead of parsing unused fields
223+
let wide_projection_data = build_wide_projection_json(ROWS, WIDE_PROJECTION_TOTAL_FIELDS);
224+
225+
let full_schema = build_schema(WIDE_PROJECTION_TOTAL_FIELDS);
226+
bench_decode_schema(
227+
c,
228+
"decode_wide_projection_full_json",
229+
&wide_projection_data,
230+
full_schema,
231+
);
232+
233+
// Projected schema: only 3 fields (f0, f10, f50) out of 100
234+
let projected_schema = build_projection_schema(&[0, 10, 50]);
235+
bench_decode_schema(
236+
c,
237+
"decode_wide_projection_narrow_json",
238+
&wide_projection_data,
239+
projected_schema,
240+
);
241+
}
242+
243+
criterion_group!(
244+
benches,
245+
bench_decode_wide_object,
246+
bench_serialize_wide_object,
247+
bench_binary_hex,
248+
bench_wide_projection
249+
);
250+
criterion_main!(benches);

0 commit comments

Comments
 (0)