import org.apache.parquet.column.ParquetProperties; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.metadata.CompressionCodecName; public class ParquetExample { public static void main(String[] args) { try { ParquetWriter parquetWriter = ParquetWriter.builder("data.parquet") .withWriteMode(ParquetFileWriter.Mode.OVERWRITE) .withCompressionCodec(CompressionCodecName.SNAPPY) .withPageSize(ParquetProperties.DEFAULT_PAGE_SIZE) .withDictionaryEncoding(true) .withValidation(true) .build(); parquetWriter.write("Hello, Parquet!"); parquetWriter.close(); ParquetReader parquetReader = ParquetReader.builder() .withFile("data.parquet") .build(); Object record; while ((record = parquetReader.read()) != null) { System.out.println(record); } parquetReader.close(); } catch (IOException e) { e.printStackTrace(); } } }


上一篇:
下一篇:
切换中文