@@ -963,7 +963,7 @@ def to_df(
963963 def to_parquet (
964964 self ,
965965 path : PathLike [str ] | str ,
966- price_type : Literal [ "fixed" , "float" ] = "float" ,
966+ price_type : PriceType | str = PriceType . FLOAT ,
967967 pretty_ts : bool = True ,
968968 map_symbols : bool = True ,
969969 schema : Schema | str | None = None ,
@@ -992,6 +992,9 @@ def to_parquet(
992992 This is only required when reading a DBN stream with mixed record types.
993993 mode : str, default "w"
994994 The file write mode to use, either "x" or "w".
995+ **kwargs : Any
996+ Keyword arguments to pass to the `pyarrow.parquet.ParquetWriter`.
997+ These can be used to override the default behavior of the writer.
995998
996999 Raises
9971000 ------
@@ -1000,10 +1003,12 @@ def to_parquet(
10001003 If the DBN schema is unspecified and cannot be determined.
10011004
10021005 """
1003- if price_type == "decimal" :
1006+ file_path = validate_file_write_path (path , "path" , exist_ok = mode == "w" )
1007+ price_type = validate_enum (price_type , PriceType , "price_type" )
1008+
1009+ if price_type == PriceType .DECIMAL :
10041010 raise ValueError ("the 'decimal' price type is not currently supported" )
10051011
1006- file_path = validate_file_write_path (path , "path" , exist_ok = mode == "w" )
10071012 schema = validate_maybe_enum (schema , Schema , "schema" )
10081013 if schema is None :
10091014 if self .schema is None :
@@ -1025,8 +1030,8 @@ def to_parquet(
10251030 # Initialize the writer using the first DataFrame
10261031 parquet_schema = pa .Schema .from_pandas (frame )
10271032 writer = pq .ParquetWriter (
1028- where = file_path ,
1029- schema = parquet_schema ,
1033+ where = kwargs . pop ( "where" , file_path ) ,
1034+ schema = kwargs . pop ( "schema" , parquet_schema ) ,
10301035 ** kwargs ,
10311036 )
10321037 writer .write_table (
0 commit comments