Skip to content

Commit 6f23e59

Browse files
Project import generated by Copybara. (#42)
1 parent f0326eb commit 6f23e59

File tree

16 files changed

+109
-97
lines changed

16 files changed

+109
-97
lines changed

CHANGELOG.md

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,18 @@
11
# Release History
22

3-
## 1.0.6
3+
## 1.0.7
4+
5+
### Behavior Changes
6+
7+
8+
### New Features
9+
10+
11+
### Bug Fixes
12+
13+
- Model Development & Model Registry: Fix an error related to `pandas.io.json.json_normalize`.
14+
15+
## 1.0.6 (2023-09-01)
416

517
### New Features
618
- Model Registry: add `create_if_not_exists` parameter in constructor.

bazel/environments/conda-env-snowflake.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ dependencies:
1616
- cryptography==39.0.1
1717
- flask-cors==3.0.10
1818
- flask==2.1.3
19-
- fsspec==2022.11.0
19+
- fsspec==2023.3.0
2020
- httpx==0.23.0
2121
- inflection==0.5.1
2222
- joblib==1.1.1
@@ -37,7 +37,7 @@ dependencies:
3737
- pyyaml==6.0
3838
- requests==2.29.0
3939
- ruamel.yaml==0.17.21
40-
- s3fs==2022.11.0
40+
- s3fs==2023.3.0
4141
- scikit-learn==1.3.0
4242
- scipy==1.9.3
4343
- snowflake-connector-python==3.0.3

bazel/environments/conda-env.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ dependencies:
1919
- cryptography==39.0.1
2020
- flask-cors==3.0.10
2121
- flask==2.1.3
22-
- fsspec==2022.11.0
22+
- fsspec==2023.3.0
2323
- httpx==0.23.0
2424
- inflection==0.5.1
2525
- joblib==1.1.1
@@ -41,7 +41,7 @@ dependencies:
4141
- pyyaml==6.0
4242
- requests==2.29.0
4343
- ruamel.yaml==0.17.21
44-
- s3fs==2022.11.0
44+
- s3fs==2023.3.0
4545
- scikit-learn==1.3.0
4646
- scipy==1.9.3
4747
- snowflake-connector-python==3.0.3

ci/conda_recipe/meta.yaml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ build:
1717
noarch: python
1818
package:
1919
name: snowflake-ml-python
20-
version: 1.0.6
20+
version: 1.0.7
2121
requirements:
2222
build:
2323
- python
@@ -27,13 +27,14 @@ requirements:
2727
- aiohttp!=4.0.0a0, !=4.0.0a1
2828
- anyio>=3.5.0,<4
2929
- cloudpickle
30-
- fsspec>=2022.11,<=2023.1
30+
- fsspec>=2022.11,<2024
3131
- numpy>=1.23,<2
3232
- packaging>=20.9,<24
3333
- pandas>=1.0.0,<2
34-
- python
34+
- python>=3.8.13, <3.11
3535
- pyyaml>=6.0,<7
3636
- requests
37+
- s3fs>=2022.11,<2024
3738
- scikit-learn>=1.2.1,<1.4
3839
- scipy>=1.9,<2
3940
- snowflake-connector-python>=3.0.3,<4

codegen/sklearn_wrapper_template.py_template

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -476,9 +476,9 @@ class {transform.original_class_name}(BaseTransformer):
476476
import pandas as pd
477477
import numpy as np
478478

479-
input_df = pd.io.json.json_normalize(ds)
479+
input_df = pd.json_normalize(ds)
480480

481-
# pd.io.json.json_normalize() doesn't remove quotes around quoted identifiers like snowpakr_df.to_pandas().
481+
# pd.json_normalize() doesn't remove quotes around quoted identifiers like snowpakr_df.to_pandas().
482482
# But trained models have unquoted input column names saved in internal state if trained using snowpark_df
483483
# or quoted input column names saved in internal state if trained using pandas_df.
484484
# Model expects exact same columns names in the input df for predict call.

requirements.yml

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -101,8 +101,8 @@
101101
dev_version: "2.1.3"
102102
- name_pypi: fsspec[http]
103103
name_conda: fsspec
104-
dev_version: "2022.11.0"
105-
version_requirements: ">=2022.11,<=2023.1"
104+
dev_version: "2023.3.0"
105+
version_requirements: ">=2022.11,<2024"
106106
- name: httpx
107107
dev_version: "0.23.0"
108108
- name: inflection
@@ -158,7 +158,7 @@
158158
dev_version: "7.1.2"
159159
- name_conda: python
160160
dev_version_conda: "3.8.13"
161-
version_requirements_conda: ""
161+
version_requirements_conda: ">=3.8.13, <3.11"
162162
- name_pypi: torch
163163
name_conda: pytorch
164164
dev_version: "2.0.1"
@@ -175,7 +175,8 @@
175175
- name: ruamel.yaml
176176
dev_version: "0.17.21"
177177
- name: s3fs
178-
dev_version: "2022.11.0"
178+
dev_version: "2023.3.0"
179+
version_requirements: ">=2022.11,<2024"
179180
- name: scikit-learn
180181
dev_version: "1.3.0"
181182
version_requirements: ">=1.2.1,<1.4"

snowflake/ml/fileset/stage_fs_test.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,7 @@
22
from typing import Dict, List
33

44
import boto3
5-
6-
# library `requests` has known stubs but is not installed.
7-
# TODO(zpeng): we may need to install as many mypy stubs as possible. However that
8-
# would require installing mypy when initializing the bazel conda environment.
9-
import requests # type: ignore
5+
import requests
106
import stage_fs
117
from absl.testing import absltest
128
from moto import server

snowflake/ml/model/_deploy_client/warehouse/infer_template.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def __exit__(self, type, value, traceback):
5252
# TODO(halu): Avoid per batch async detection branching.
5353
@vectorized(input=pd.DataFrame, max_batch_size=10)
5454
def infer(df):
55-
input_df = pd.io.json.json_normalize(df[0]).astype(dtype=dtype_map)
55+
input_df = pd.json_normalize(df[0]).astype(dtype=dtype_map)
5656
if inspect.iscoroutinefunction(model.{target_method}):
5757
predictions_df = anyio.run(model.{target_method}, input_df[input_cols])
5858
else:

snowflake/ml/modeling/model_selection/_internal/_grid_search_cv.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -493,7 +493,6 @@ def _fit_snowpark(self, dataset: DataFrame) -> None:
493493
]
494494
target_locations = []
495495
for param_chunk in param_chunks:
496-
497496
param_chunk_dist: Any = defaultdict(set)
498497
for d in param_chunk:
499498
for k, v in d.items():
@@ -675,9 +674,9 @@ def vec_batch_infer(ds: PandasSeries[dict]) -> PandasSeries[dict]: # type: igno
675674
import numpy as np
676675
import pandas as pd
677676

678-
input_df = pd.io.json.json_normalize(ds)
677+
input_df = pd.json_normalize(ds)
679678

680-
# pd.io.json.json_normalize() doesn't remove quotes around quoted identifiers like snowpakr_df.to_pandas().
679+
# pd.json_normalize() doesn't remove quotes around quoted identifiers like snowpakr_df.to_pandas().
681680
# But trained models have unquoted input column names saved in internal state if trained using snowpark_df
682681
# or quoted input column names saved in internal state if trained using pandas_df.
683682
# Model expects exact same columns names in the input df for predict call.

snowflake/ml/modeling/model_selection/_internal/_randomized_search_cv.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,6 @@ def _fit_snowpark(self, dataset: DataFrame) -> None:
503503
]
504504
target_locations = []
505505
for param_chunk in param_chunks:
506-
507506
param_chunk_dist: Any = defaultdict(set)
508507
for d in param_chunk:
509508
for k, v in d.items():
@@ -684,9 +683,9 @@ def vec_batch_infer(ds: PandasSeries[dict]) -> PandasSeries[dict]: # type: igno
684683
import numpy as np
685684
import pandas as pd
686685

687-
input_df = pd.io.json.json_normalize(ds)
686+
input_df = pd.json_normalize(ds)
688687

689-
# pd.io.json.json_normalize() doesn't remove quotes around quoted identifiers like snowpakr_df.to_pandas().
688+
# pd.json_normalize() doesn't remove quotes around quoted identifiers like snowpakr_df.to_pandas().
690689
# But trained models have unquoted input column names saved in internal state if trained using snowpark_df
691690
# or quoted input column names saved in internal state if trained using pandas_df.
692691
# Model expects exact same columns names in the input df for predict call.

0 commit comments

Comments
 (0)