suppress warning message of pandas_on_spark to_spark (#1058)

This commit is contained in:
Li Jiang
2023-06-02 00:04:01 +08:00
committed by GitHub
parent b975df81da
commit d36b2afe7f
2 changed files with 16 additions and 0 deletions

View File

@@ -54,6 +54,10 @@ def spark_metric_loss_score(
Returns:
float | the loss score. A lower value indicates a better model.
"""
import warnings
warnings.filterwarnings("ignore")
label_col = "label"
prediction_col = "prediction"
kwargs = {}

View File

@@ -92,6 +92,10 @@ def train_test_split_pyspark(
pyspark.sql.DataFrame/pandas_on_spark DataFrame | The train dataframe.
pyspark.sql.DataFrame/pandas_on_spark DataFrame | The test dataframe.
"""
import warnings
warnings.filterwarnings("ignore")
if isinstance(df, psDataFrame):
df = df.to_spark(index_col=index_col)
@@ -156,6 +160,10 @@ def iloc_pandas_on_spark(
index_col: Optional[str] = "tmp_index_col",
) -> Union[psDataFrame, psSeries]:
"""Get the rows of a pandas_on_spark dataframe/series by index."""
import warnings
warnings.filterwarnings("ignore")
if isinstance(psdf, (DataFrame, Series)):
return psdf.iloc[index]
if isinstance(index, (int, slice)):
@@ -207,6 +215,10 @@ def spark_kFold(
Returns:
A list of (train, validation) DataFrames.
"""
import warnings
warnings.filterwarnings("ignore")
if isinstance(dataset, psDataFrame):
dataset = dataset.to_spark(index_col=index_col)