seaweedfs/weed/mq/schema/write_parquet_test.go

112 lines
3.2 KiB
Go
Raw Normal View History

2024-04-19 13:41:20 +08:00
package schema
import (
"fmt"
2024-04-22 15:42:18 +08:00
"github.com/parquet-go/parquet-go"
"github.com/parquet-go/parquet-go/compress/zstd"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
"io"
"os"
2024-04-19 13:41:20 +08:00
"testing"
)
func TestWriteParquet(t *testing.T) {
// create a schema_pb.RecordType
recordType := NewRecordTypeBuilder().
AddLongField("ID").
AddLongField("CreatedAt").
AddRecordField("Person", NewRecordTypeBuilder().
2024-04-22 15:42:18 +08:00
AddStringField("zName").
AddListField("emails", TypeString)).
AddStringField("Company").Build()
2024-04-19 13:41:20 +08:00
fmt.Printf("RecordType: %v\n", recordType)
// create a parquet schema
parquetSchema, err := ToParquetSchema("example", recordType)
if err != nil {
t.Fatalf("ToParquetSchema failed: %v", err)
}
fmt.Printf("ParquetSchema: %v\n", parquetSchema)
2024-04-22 15:42:18 +08:00
parquet.PrintSchema(os.Stdout, "example", parquetSchema)
2024-04-19 13:41:20 +08:00
2024-04-22 15:42:18 +08:00
filename := "example.parquet"
testWritingParquetFile(t, filename, parquetSchema, recordType)
total := testReadingParquetFile(t, filename, parquetSchema, recordType)
if total != 128*1024 {
t.Fatalf("total != 128*1024: %v", total)
}
}
func testWritingParquetFile(t *testing.T, filename string, parquetSchema *parquet.Schema, recordType *schema_pb.RecordType) {
// create a parquet file
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0664)
if err != nil {
t.Fatalf("os.Open failed: %v", err)
}
defer file.Close()
writer := parquet.NewWriter(file, parquetSchema, parquet.Compression(&zstd.Codec{Level: zstd.SpeedDefault}))
rowBuilder := parquet.NewRowBuilder(parquetSchema)
for i := 0; i < 128; i++ {
rowBuilder.Reset()
// generate random data
AddRecordValue(rowBuilder, recordType, NewRecordValueBuilder().
AddLongValue("ID", int64(1+i)).
AddLongValue("CreatedAt", 2*int64(i)).
AddRecordValue("Person", NewRecordValueBuilder().
AddStringValue("zName", fmt.Sprintf("john_%d", i)).
AddStringListValue("emails",
fmt.Sprintf("john_%d@y.com", i),
fmt.Sprintf("john_%d@g.com", i),
fmt.Sprintf("john_%d@t.com", i))).
AddStringValue("Company", fmt.Sprintf("company_%d", i)).Build())
row := rowBuilder.Row()
if err != nil {
t.Fatalf("rowBuilder.Build failed: %v", err)
}
if _, err = writer.WriteRows([]parquet.Row{row}); err != nil {
t.Fatalf("writer.Write failed: %v", err)
}
}
if err = writer.Close(); err != nil {
t.Fatalf("writer.WriteStop failed: %v", err)
}
}
func testReadingParquetFile(t *testing.T, filename string, parquetSchema *parquet.Schema, recordType *schema_pb.RecordType) (total int) {
// read the parquet file
file, err := os.Open(filename)
if err != nil {
t.Fatalf("os.Open failed: %v", err)
}
defer file.Close()
reader := parquet.NewReader(file, parquetSchema)
rows := make([]parquet.Row, 128)
for {
rowCount, err := reader.ReadRows(rows)
if err != nil {
if err == io.EOF {
break
}
t.Fatalf("reader.Read failed: %v", err)
}
for i := 0; i < rowCount; i++ {
row := rows[i]
// convert parquet row to schema_pb.RecordValue
recordValue, err := ToRecordValue(recordType, row)
if err != nil {
t.Fatalf("ToRecordValue failed: %v", err)
}
fmt.Printf("RecordValue: %v\n", recordValue)
}
total += rowCount
}
fmt.Printf("total: %v\n", total)
return
2024-04-19 13:41:20 +08:00
}