mirror of
https://github.com/microsoft/FLAML.git
synced 2026-02-16 05:32:24 +08:00
Compare commits
627 Commits
v2.3.2
...
unit-tests
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3dee40d08 | ||
|
|
1d301b413c | ||
|
|
7083042210 | ||
|
|
4ac0347e5a | ||
|
|
579f795f33 | ||
|
|
897eae6616 | ||
|
|
6f573938bf | ||
|
|
cc903f7807 | ||
|
|
e88a464fab | ||
|
|
7ddd7f0a58 | ||
|
|
9ecb642685 | ||
|
|
ce6ff2e0b5 | ||
|
|
b0886939ab | ||
|
|
ca0eb7b75b | ||
|
|
06111054a4 | ||
|
|
3b69d7a69c | ||
|
|
ec25d5bce7 | ||
|
|
3eb01a5778 | ||
|
|
41016d6087 | ||
|
|
7fea33db97 | ||
|
|
02f8ca32de | ||
|
|
2c0f95df98 | ||
|
|
61742144cb | ||
|
|
0b4d76f509 | ||
|
|
a2ba14114c | ||
|
|
c014969616 | ||
|
|
f0e2438cc1 | ||
|
|
bf53bd4a07 | ||
|
|
e0f24f0ba3 | ||
|
|
056ac5e4c6 | ||
|
|
85dedc331f | ||
|
|
2ba1c2da01 | ||
|
|
5efcccc953 | ||
|
|
309f4a8a4e | ||
|
|
e6380356df | ||
|
|
bef0c16f22 | ||
|
|
fd7b9a07ab | ||
|
|
41257f04bd | ||
|
|
16a1406b7c | ||
|
|
fd5cdbe59a | ||
|
|
15527dc220 | ||
|
|
9ddd6a9df5 | ||
|
|
76f6a13d2a | ||
|
|
53baaded6d | ||
|
|
a883ec704b | ||
|
|
5c7a880deb | ||
|
|
12575f0e0d | ||
|
|
0743babb84 | ||
|
|
d1d446374c | ||
|
|
ac12bbae9c | ||
|
|
d58f6395c6 | ||
|
|
4054d34e0e | ||
|
|
bf51f91ff5 | ||
|
|
ef07860f3c | ||
|
|
eb3890fd4b | ||
|
|
6010067763 | ||
|
|
db0c266dc1 | ||
|
|
283c2d1a7b | ||
|
|
3f35fb2470 | ||
|
|
5cd3a99b5f | ||
|
|
5ec8e36f77 | ||
|
|
a46f9d5d78 | ||
|
|
ae86fe076e | ||
|
|
bed2448078 | ||
|
|
233e214b61 | ||
|
|
172a547268 | ||
|
|
47af42c6a4 | ||
|
|
bfce7ae400 | ||
|
|
ff1eb9ef38 | ||
|
|
73029aa906 | ||
|
|
d06b36af28 | ||
|
|
b5cd00ad10 | ||
|
|
a195f342fa | ||
|
|
86212cd8ed | ||
|
|
6f0059e257 | ||
|
|
3e7d48623b | ||
|
|
722e6dc83e | ||
|
|
0f6b1d032d | ||
|
|
ba930078fe | ||
|
|
f12614e759 | ||
|
|
e96d9dc5c6 | ||
|
|
4368cf4527 | ||
|
|
b3096123ae | ||
|
|
d214bbecf8 | ||
|
|
394152fc2f | ||
|
|
19e033e15d | ||
|
|
80a6e7a7ec | ||
|
|
65b380c1a1 | ||
|
|
398860d998 | ||
|
|
8b1f4c631c | ||
|
|
4a3c6700dc | ||
|
|
c3a2edce67 | ||
|
|
5f50f10cb2 | ||
|
|
ca799e8ca2 | ||
|
|
961a286216 | ||
|
|
5e52a7a682 | ||
|
|
8f68be42f8 | ||
|
|
de61437024 | ||
|
|
60232bfb73 | ||
|
|
5432439b7f | ||
|
|
4491ee01c2 | ||
|
|
381d5681c2 | ||
|
|
34009fc497 | ||
|
|
29b5f75c6a | ||
|
|
0924c7854a | ||
|
|
b4dd35a25a | ||
|
|
9f4940fc35 | ||
|
|
c0f933e2eb | ||
|
|
a5ec782122 | ||
|
|
abd19c037b | ||
|
|
5ee41879d6 | ||
|
|
0141e1d661 | ||
|
|
75d962ad0d | ||
|
|
155a4cef92 | ||
|
|
f7ed953755 | ||
|
|
2b5c5f8de0 | ||
|
|
47bc5b2923 | ||
|
|
3b27af285f | ||
|
|
344b22a7dd | ||
|
|
6d22922dc3 | ||
|
|
ff0c8b8073 | ||
|
|
6234d4608b | ||
|
|
ef4ecfb881 | ||
|
|
b7b007f8e5 | ||
|
|
f4d0b5d48f | ||
|
|
e257232561 | ||
|
|
de8da18494 | ||
|
|
d0b2df514c | ||
|
|
8af15bb80f | ||
|
|
d15b697efa | ||
|
|
c6715e8c4c | ||
|
|
1693d15456 | ||
|
|
0e8ed63b01 | ||
|
|
7256fddd37 | ||
|
|
a7b4323d03 | ||
|
|
ad31a4682e | ||
|
|
8f956c596b | ||
|
|
de8d34c5f3 | ||
|
|
93c64a9cda | ||
|
|
92a94d2073 | ||
|
|
28fae32e80 | ||
|
|
9084585a9c | ||
|
|
16cadb12b1 | ||
|
|
a989e60a85 | ||
|
|
dd7b107fea | ||
|
|
84d8ffb6c1 | ||
|
|
c63f882780 | ||
|
|
453c7c714c | ||
|
|
8e56e1f9a2 | ||
|
|
6864897fc6 | ||
|
|
4c9a570fc9 | ||
|
|
6d928c22fa | ||
|
|
d952bc18e7 | ||
|
|
926624eb17 | ||
|
|
0bb9d32967 | ||
|
|
3e6aa17191 | ||
|
|
4ebd16b210 | ||
|
|
90eff402de | ||
|
|
b862c18fd7 | ||
|
|
46c1bfc1ca | ||
|
|
bae394dd0d | ||
|
|
b6484f51e6 | ||
|
|
140cc3f8fb | ||
|
|
dc57da5612 | ||
|
|
ea23be1a88 | ||
|
|
4af704e589 | ||
|
|
48c582c1c6 | ||
|
|
7328e8b50e | ||
|
|
43bbf77039 | ||
|
|
bda71a0b1d | ||
|
|
be43f4da1b | ||
|
|
b76f03cad0 | ||
|
|
1dff814f4c | ||
|
|
f2a8f0a8c7 | ||
|
|
10fad748a8 | ||
|
|
8d529fdf22 | ||
|
|
1a391c9ed9 | ||
|
|
f9565384f8 | ||
|
|
aff36c6693 | ||
|
|
4423cd9054 | ||
|
|
c4d441373f | ||
|
|
5243fd2b62 | ||
|
|
55babfe19a | ||
|
|
89b5aa8a45 | ||
|
|
ef914fee26 | ||
|
|
037efe7d33 | ||
|
|
86227fe08f | ||
|
|
1641712fde | ||
|
|
1f0d3ce2bc | ||
|
|
c26aeb7171 | ||
|
|
0d5af3bdd1 | ||
|
|
519b12db3b | ||
|
|
c54660fe3c | ||
|
|
ba6671aa94 | ||
|
|
e0cb926767 | ||
|
|
5fd6d9e179 | ||
|
|
f827161ac5 | ||
|
|
63d96ca081 | ||
|
|
7ecd6c54aa | ||
|
|
b59486e842 | ||
|
|
1e78ccfcd9 | ||
|
|
d711d7857f | ||
|
|
d65683a2dc | ||
|
|
4a1621549a | ||
|
|
e831146e11 | ||
|
|
ced77366f2 | ||
|
|
a2c1301360 | ||
|
|
1bbc1ea3db | ||
|
|
2b29bccbab | ||
|
|
db4262934e | ||
|
|
248b217b2e | ||
|
|
8ac10c1947 | ||
|
|
b39cad6e78 | ||
|
|
fc9e29c1ed | ||
|
|
1a39bfc0fd | ||
|
|
3200e542b3 | ||
|
|
9988645b02 | ||
|
|
ced1d99e55 | ||
|
|
ce1fdccb77 | ||
|
|
5155b242c5 | ||
|
|
c1f3b3b0ba | ||
|
|
30a58c79d5 | ||
|
|
70940eaf4d | ||
|
|
a1864f8dc0 | ||
|
|
028de3fa00 | ||
|
|
4db2bfb496 | ||
|
|
ca0723ce02 | ||
|
|
cc2180fb8a | ||
|
|
936ffa3634 | ||
|
|
6c6b191454 | ||
|
|
4dac580b41 | ||
|
|
7b51a54291 | ||
|
|
9b8183ebec | ||
|
|
69a6f614d5 | ||
|
|
5eee4f1ea6 | ||
|
|
13783f107d | ||
|
|
1b3cee0c1f | ||
|
|
9f7b444c8f | ||
|
|
cc6ccc9e6f | ||
|
|
cc741e6599 | ||
|
|
c5bb506edf | ||
|
|
c195b28b8d | ||
|
|
a160d24729 | ||
|
|
2a323881b2 | ||
|
|
31aeef5f06 | ||
|
|
05adedbb3b | ||
|
|
e08ba40133 | ||
|
|
749ae1c738 | ||
|
|
8a94585710 | ||
|
|
1fcf89efaa | ||
|
|
c21116d27f | ||
|
|
3c7e211a03 | ||
|
|
8e45c29302 | ||
|
|
c85047784e | ||
|
|
03156f0137 | ||
|
|
8739350051 | ||
|
|
1427198b51 | ||
|
|
e50c84ce72 | ||
|
|
f9da8e07cb | ||
|
|
0850cfb0a9 | ||
|
|
da314a8733 | ||
|
|
d4c6a03305 | ||
|
|
c15ed5fed9 | ||
|
|
bfb62186be | ||
|
|
7d7de8b1d7 | ||
|
|
8bd09ccb51 | ||
|
|
4138db8c9d | ||
|
|
f6c4f6e9eb | ||
|
|
23c5b53481 | ||
|
|
873cbea4f4 | ||
|
|
42a1477022 | ||
|
|
32bec11396 | ||
|
|
b9184dc01f | ||
|
|
54c74bed73 | ||
|
|
9bc3c51ac4 | ||
|
|
fd895b3796 | ||
|
|
467665fabf | ||
|
|
f7a97581cb | ||
|
|
2e09687c8f | ||
|
|
264074b984 | ||
|
|
9cffeaf068 | ||
|
|
dba2975355 | ||
|
|
93a10e90c9 | ||
|
|
cbfc7c7d6f | ||
|
|
b7406d8b65 | ||
|
|
33984639f8 | ||
|
|
a47b9a3f44 | ||
|
|
07a63acf38 | ||
|
|
b4859d8870 | ||
|
|
55a2a30f3d | ||
|
|
278bf5b548 | ||
|
|
cc5ea0c168 | ||
|
|
b6ce840333 | ||
|
|
97b769ff62 | ||
|
|
9cf2d0af06 | ||
|
|
099273591c | ||
|
|
61f914e898 | ||
|
|
b5942269a9 | ||
|
|
1d355fe0b5 | ||
|
|
fb237ea738 | ||
|
|
858bd2323b | ||
|
|
a0774f70fc | ||
|
|
63fab6da65 | ||
|
|
99966aca73 | ||
|
|
6c28ba6c5d | ||
|
|
d28ef9d5a1 | ||
|
|
cd096a1386 | ||
|
|
7d9559aeeb | ||
|
|
c2e79b9cdc | ||
|
|
cb7fc18490 | ||
|
|
9075598d4e | ||
|
|
0763b6a1c1 | ||
|
|
56cb755e9e | ||
|
|
0bc3841b67 | ||
|
|
c6904fc1fa | ||
|
|
ad32fac79d | ||
|
|
512641abcf | ||
|
|
66561aed17 | ||
|
|
dec289c035 | ||
|
|
2248ecf6fe | ||
|
|
81b2a6526a | ||
|
|
56bcea212a | ||
|
|
d584f4dcb1 | ||
|
|
a5dc39f06e | ||
|
|
dc327b38f3 | ||
|
|
53d546b73e | ||
|
|
cbc8d0c94a | ||
|
|
1ab20b5b7c | ||
|
|
f893dba6a9 | ||
|
|
9a6e783199 | ||
|
|
a4b9392a69 | ||
|
|
b96a25bae3 | ||
|
|
dc6f191861 | ||
|
|
452ff33d88 | ||
|
|
fd5c2a9dfd | ||
|
|
7a1d7e380d | ||
|
|
53b512547e | ||
|
|
0936189dcd | ||
|
|
4b2451e7c9 | ||
|
|
3782afa347 | ||
|
|
f75beb919d | ||
|
|
f755a7a0da | ||
|
|
5c1060715f | ||
|
|
2c85192386 | ||
|
|
1e7a9dc345 | ||
|
|
69a66828a3 | ||
|
|
773345604a | ||
|
|
6cca7d2d63 | ||
|
|
dd846715ec | ||
|
|
3275867baa | ||
|
|
52c68a9445 | ||
|
|
19477683c4 | ||
|
|
8731dddc75 | ||
|
|
45787b4979 | ||
|
|
fb6c90460b | ||
|
|
442024d41f | ||
|
|
da958f329a | ||
|
|
bab9aed3aa | ||
|
|
7e1f12a4c8 | ||
|
|
02f1919006 | ||
|
|
6976ce0c8e | ||
|
|
eb679157d1 | ||
|
|
181dd2dad8 | ||
|
|
183e661079 | ||
|
|
d1868a8bdb | ||
|
|
1526f67080 | ||
|
|
1088e3c6a0 | ||
|
|
df209585bb | ||
|
|
d949298287 | ||
|
|
7d5aa555eb | ||
|
|
6faa8b479b | ||
|
|
628f33636f | ||
|
|
c9726fb7ab | ||
|
|
838cee5eb4 | ||
|
|
ab6a0ff105 | ||
|
|
2604961809 | ||
|
|
a3ec02fab0 | ||
|
|
a6d3878995 | ||
|
|
69672f624b | ||
|
|
763e4a3ae1 | ||
|
|
cef80a0212 | ||
|
|
fac55074ef | ||
|
|
6e8ede1d15 | ||
|
|
d77e98cb36 | ||
|
|
4514caffb4 | ||
|
|
f2f9c3b5b5 | ||
|
|
b314f18471 | ||
|
|
ee49ad7e57 | ||
|
|
c4755e167b | ||
|
|
7760d0f2c3 | ||
|
|
2e6a150709 | ||
|
|
c685dd1bf9 | ||
|
|
f751f09e28 | ||
|
|
13e3d4da2b | ||
|
|
b3a1262068 | ||
|
|
131aba9158 | ||
|
|
5ed43cbdf9 | ||
|
|
a41d0b1752 | ||
|
|
125d735968 | ||
|
|
551bdaf164 | ||
|
|
bc1085817e | ||
|
|
28067fdf5f | ||
|
|
8d125f7755 | ||
|
|
c7f3197d38 | ||
|
|
85bcb2ebbc | ||
|
|
259a0fa252 | ||
|
|
96349ba58c | ||
|
|
e10e4e6d24 | ||
|
|
d91a7654da | ||
|
|
bfdfefa728 | ||
|
|
f6b43f0837 | ||
|
|
05c7767006 | ||
|
|
0ca166fce3 | ||
|
|
2b6e59d93b | ||
|
|
e2948f577a | ||
|
|
15049d6b23 | ||
|
|
62831146a9 | ||
|
|
598bcdd1b3 | ||
|
|
56570f8d8c | ||
|
|
5118e66e80 | ||
|
|
094f0102c1 | ||
|
|
cf197b3766 | ||
|
|
c75bd5c203 | ||
|
|
6af1ea046e | ||
|
|
96bcc314a1 | ||
|
|
2d0d238806 | ||
|
|
6114aa8c03 | ||
|
|
f30ae2200a | ||
|
|
2d393e6db0 | ||
|
|
477e74ca3c | ||
|
|
769e7c752c | ||
|
|
0b852845b9 | ||
|
|
d47f9a2bee | ||
|
|
702880b8cf | ||
|
|
662e7be4b0 | ||
|
|
b1c98effd5 | ||
|
|
3a3e7390db | ||
|
|
07011efa39 | ||
|
|
30de97c8a7 | ||
|
|
7e1242f8dd | ||
|
|
829e565323 | ||
|
|
8a24a25190 | ||
|
|
b0e5225392 | ||
|
|
4dcac0424d | ||
|
|
43d3e2af31 | ||
|
|
023b674622 | ||
|
|
aa10362f76 | ||
|
|
4f24e1f87d | ||
|
|
5acf1fd0d4 | ||
|
|
943fddb700 | ||
|
|
43ed027d3b | ||
|
|
3c04b4b906 | ||
|
|
fc95d14d96 | ||
|
|
eac8b2b415 | ||
|
|
b73c4c62ea | ||
|
|
7ef6253a2e | ||
|
|
1d220749b7 | ||
|
|
4b14559a9b | ||
|
|
25b5d43df8 | ||
|
|
a7f84f29cf | ||
|
|
71d9a6ffe6 | ||
|
|
40b3ce4906 | ||
|
|
7aa442d7cd | ||
|
|
5d0a6c558f | ||
|
|
1784d14c93 | ||
|
|
d7773e7db3 | ||
|
|
5294f760ac | ||
|
|
d40fb36e05 | ||
|
|
7f56524013 | ||
|
|
a793f4820c | ||
|
|
9e6f030b46 | ||
|
|
fd02389fe9 | ||
|
|
e44678d2a4 | ||
|
|
cfa50479bb | ||
|
|
3e92aa724b | ||
|
|
d61a4fd134 | ||
|
|
aa7bc1ffad | ||
|
|
9db0dde352 | ||
|
|
923fd3f8f7 | ||
|
|
8f18f5b45b | ||
|
|
556f566a5a | ||
|
|
8267782825 | ||
|
|
4205fa813b | ||
|
|
6de06dc1f1 | ||
|
|
15bb158944 | ||
|
|
7a3d7ef207 | ||
|
|
f818b1cc18 | ||
|
|
9d9c6149e0 | ||
|
|
674cf6f34a | ||
|
|
85f1ae4b45 | ||
|
|
98539a0a72 | ||
|
|
5657200deb | ||
|
|
fc676b7aba | ||
|
|
217bdff0cc | ||
|
|
5252cc2896 | ||
|
|
f39a9780cf | ||
|
|
398f11227b | ||
|
|
124e274341 | ||
|
|
c4b8c337df | ||
|
|
567f4a9399 | ||
|
|
eeddf004ec | ||
|
|
546eb621af | ||
|
|
3d8d718c4b | ||
|
|
b264f04b11 | ||
|
|
d92fe045f9 | ||
|
|
e05ae5973b | ||
|
|
1e36194053 | ||
|
|
c781c68cef | ||
|
|
e8f40f4e31 | ||
|
|
a8e5aced66 | ||
|
|
27c10923ac | ||
|
|
d73fba54da | ||
|
|
9ed4797859 | ||
|
|
33d0e3f1e3 | ||
|
|
a266fe8bbf | ||
|
|
6167f6c1e7 | ||
|
|
7f5f0017ee | ||
|
|
ad706c9244 | ||
|
|
5456996015 | ||
|
|
d20ab683a8 | ||
|
|
3db891a894 | ||
|
|
a7efb9159f | ||
|
|
ad009ce72e | ||
|
|
d0388c3fe1 | ||
|
|
b86c13e1eb | ||
|
|
ba26b0b3e1 | ||
|
|
58fe076256 | ||
|
|
3ab9ce3cda | ||
|
|
cec0cc42be | ||
|
|
eb780e1328 | ||
|
|
686230587f | ||
|
|
ded551a125 | ||
|
|
c02eb147f6 | ||
|
|
2580d7d662 | ||
|
|
dbc0f6d559 | ||
|
|
28fb389887 | ||
|
|
9669552402 | ||
|
|
4f6933e5ef | ||
|
|
5a907b93ed | ||
|
|
1157c2d081 | ||
|
|
3d4e827e4f | ||
|
|
2f5bae733b | ||
|
|
8fa6578c3a | ||
|
|
7dd998ac4a | ||
|
|
42eb9bf43f | ||
|
|
c866753ec8 | ||
|
|
f1c923e2b1 | ||
|
|
8aaae8cacb | ||
|
|
d07cbabdfc | ||
|
|
8da9721368 | ||
|
|
9b940a1dca | ||
|
|
78d11138a7 | ||
|
|
8d1e7f98a6 | ||
|
|
178b99ac5b | ||
|
|
ac2e215731 | ||
|
|
b86df95d29 | ||
|
|
bb871ebbb7 | ||
|
|
b747e4857d | ||
|
|
75c7365d08 | ||
|
|
a2fc917aba | ||
|
|
d18a67ecdf | ||
|
|
c682df0d04 | ||
|
|
cd9750f67d | ||
|
|
3f560e1521 | ||
|
|
71127a1bdb | ||
|
|
9b4626a54c | ||
|
|
8cea895aeb | ||
|
|
c630929668 | ||
|
|
0eecc19018 | ||
|
|
d3f45a1f5b | ||
|
|
a9bcf465d2 | ||
|
|
7e79f6aeec | ||
|
|
db999b1f8b | ||
|
|
cb815d6eef | ||
|
|
e7277d8092 | ||
|
|
c9a6fb875c | ||
|
|
c9a225e615 | ||
|
|
295ad7f7a9 | ||
|
|
fdb00491ab | ||
|
|
83f64ce9a4 | ||
|
|
71e3504db8 | ||
|
|
b558803ece | ||
|
|
880e947216 | ||
|
|
05d433e216 | ||
|
|
41f51297c6 | ||
|
|
1285700d7a | ||
|
|
7f42bece89 | ||
|
|
e19107407b | ||
|
|
f5d6693253 | ||
|
|
d4e43c50a2 | ||
|
|
13aec414ea | ||
|
|
bb16dcde93 | ||
|
|
be81a76da9 | ||
|
|
2d16089529 | ||
|
|
01c3c83653 | ||
|
|
9b66103f7c | ||
|
|
48dfd72e64 | ||
|
|
dec92e5b02 | ||
|
|
22911ea1ef | ||
|
|
12183e5f73 | ||
|
|
c2b25310fc | ||
|
|
0f9420590d | ||
|
|
5107c506b4 | ||
|
|
9e219ef8dc | ||
|
|
6e4083743b | ||
|
|
17e95edd9e | ||
|
|
468bc62d27 | ||
|
|
437c239c11 | ||
|
|
8e753f1092 | ||
|
|
a3b57e11d4 | ||
|
|
a80dcf9925 | ||
|
|
7157af44e0 | ||
|
|
1798c4591e | ||
|
|
dd26263330 | ||
|
|
2ba5f8bed1 | ||
|
|
d0a11958a5 | ||
|
|
0ef9b00a75 | ||
|
|
840f76e5e5 | ||
|
|
d8b7d25b80 | ||
|
|
6d53929803 | ||
|
|
c038fbca07 | ||
|
|
6a99202492 | ||
|
|
42d1dcfa0e | ||
|
|
b83c8a7d3b | ||
|
|
b9194cdcf2 | ||
|
|
9a1f6b0291 | ||
|
|
07f4413aae |
21
.github/workflows/CD.yml
vendored
21
.github/workflows/CD.yml
vendored
@@ -12,26 +12,17 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
os: ['ubuntu-latest']
|
||||
python-version: [3.8]
|
||||
os: ["ubuntu-latest"]
|
||||
python-version: ["3.10"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
environment: package
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Cache conda
|
||||
uses: actions/cache@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
path: ~/conda_pkgs_dir
|
||||
key: conda-${{ matrix.os }}-python-${{ matrix.python-version }}-${{ hashFiles('environment.yml') }}
|
||||
- name: Setup Miniconda
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-update-conda: true
|
||||
auto-activate-base: false
|
||||
activate-environment: hcrystalball
|
||||
python-version: ${{ matrix.python-version }}
|
||||
use-only-tar-bz2: true
|
||||
- name: Install from source
|
||||
# This is required for the pre-commit tests
|
||||
shell: pwsh
|
||||
@@ -42,7 +33,7 @@ jobs:
|
||||
- name: Build
|
||||
shell: pwsh
|
||||
run: |
|
||||
pip install twine
|
||||
pip install twine wheel setuptools
|
||||
python setup.py sdist bdist_wheel
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
|
||||
8
.github/workflows/deploy-website.yml
vendored
8
.github/workflows/deploy-website.yml
vendored
@@ -37,11 +37,11 @@ jobs:
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.10"
|
||||
- name: pydoc-markdown install
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pydoc-markdown==4.5.0
|
||||
pip install pydoc-markdown==4.7.0
|
||||
- name: pydoc-markdown run
|
||||
run: |
|
||||
pydoc-markdown
|
||||
@@ -73,11 +73,11 @@ jobs:
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.10"
|
||||
- name: pydoc-markdown install
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pydoc-markdown==4.5.0
|
||||
pip install pydoc-markdown==4.7.0
|
||||
- name: pydoc-markdown run
|
||||
run: |
|
||||
pydoc-markdown
|
||||
|
||||
105
.github/workflows/python-package.yml
vendored
105
.github/workflows/python-package.yml
vendored
@@ -14,10 +14,20 @@ on:
|
||||
- 'setup.py'
|
||||
pull_request:
|
||||
branches: ['main']
|
||||
paths:
|
||||
- 'flaml/**'
|
||||
- 'test/**'
|
||||
- 'notebook/**'
|
||||
- '.github/workflows/python-package.yml'
|
||||
- 'setup.py'
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
schedule:
|
||||
# Every other day at 02:00 UTC
|
||||
- cron: '0 2 */2 * *'
|
||||
|
||||
permissions: {}
|
||||
permissions:
|
||||
contents: write
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
@@ -29,8 +39,11 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-2019]
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
python-version: ["3.10", "3.11"]
|
||||
exclude:
|
||||
- os: macos-latest
|
||||
python-version: "3.10"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
@@ -38,7 +51,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: On mac, install libomp to facilitate lgbm and xgboost install
|
||||
if: matrix.os == 'macOS-latest'
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
brew update
|
||||
brew install libomp
|
||||
@@ -50,7 +63,7 @@ jobs:
|
||||
export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/local/opt/libomp/lib -L/usr/local/opt/libomp/lib -lomp"
|
||||
- name: Install packages and dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip wheel
|
||||
python -m pip install --upgrade pip wheel setuptools
|
||||
pip install -e .
|
||||
python -c "import flaml"
|
||||
pip install -e .[test]
|
||||
@@ -64,33 +77,43 @@ jobs:
|
||||
run: |
|
||||
pip install pyspark==3.5.1
|
||||
pip list | grep "pyspark"
|
||||
- name: If linux and python<3.11, install ray 2
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.python-version != '3.11'
|
||||
- name: On Ubuntu python 3.12, install pyspark 4.0.1
|
||||
if: matrix.python-version == '3.12' && matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
pip install "ray[tune]<2.5.0"
|
||||
- name: If mac and python 3.10, install ray and xgboost 1
|
||||
if: matrix.os == 'macOS-latest' && matrix.python-version == '3.10'
|
||||
run: |
|
||||
pip install -e .[ray]
|
||||
# use macOS to test xgboost 1, but macOS also supports xgboost 2
|
||||
pip install "xgboost<2"
|
||||
- name: If linux, install prophet on python < 3.9
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.8'
|
||||
pip install pyspark==4.0.1
|
||||
pip list | grep "pyspark"
|
||||
# # TODO: support ray
|
||||
# - name: If linux and python<3.11, install ray 2
|
||||
# if: matrix.os == 'ubuntu-latest' && matrix.python-version < '3.11'
|
||||
# run: |
|
||||
# pip install "ray[tune]<2.5.0"
|
||||
- name: Install prophet when on linux
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
pip install -e .[forecast]
|
||||
- name: Install vw on python < 3.10
|
||||
if: matrix.python-version == '3.8' || matrix.python-version == '3.9'
|
||||
# TODO: support vw for python 3.10+
|
||||
- name: If linux and python<3.10, install vw
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.python-version < '3.10'
|
||||
run: |
|
||||
pip install -e .[vw]
|
||||
- name: Pip freeze
|
||||
run: |
|
||||
pip freeze
|
||||
- name: Check dependencies
|
||||
run: |
|
||||
python test/check_dependency.py
|
||||
- name: Clear pip cache
|
||||
run: |
|
||||
pip cache purge
|
||||
- name: Test with pytest
|
||||
if: matrix.python-version != '3.10'
|
||||
run: |
|
||||
pytest test
|
||||
pytest test/ --ignore=test/autogen --reruns 2 --reruns-delay 10
|
||||
- name: Coverage
|
||||
if: matrix.python-version == '3.10'
|
||||
run: |
|
||||
pip install coverage
|
||||
coverage run -a -m pytest test
|
||||
coverage run -a -m pytest test --ignore=test/autogen --reruns 2 --reruns-delay 10
|
||||
coverage xml
|
||||
- name: Upload coverage to Codecov
|
||||
if: matrix.python-version == '3.10'
|
||||
@@ -98,28 +121,24 @@ jobs:
|
||||
with:
|
||||
file: ./coverage.xml
|
||||
flags: unittests
|
||||
- name: Save dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
git config --global user.name 'github-actions[bot]'
|
||||
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
|
||||
git config advice.addIgnoredFile false
|
||||
|
||||
# docs:
|
||||
BRANCH=unit-tests-installed-dependencies
|
||||
git fetch origin
|
||||
git checkout -B "$BRANCH"
|
||||
if git show-ref --verify --quiet "refs/remotes/origin/$BRANCH"; then
|
||||
git rebase "origin/$BRANCH"
|
||||
fi
|
||||
|
||||
# runs-on: ubuntu-latest
|
||||
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - name: Setup Python
|
||||
# uses: actions/setup-python@v4
|
||||
# with:
|
||||
# python-version: '3.8'
|
||||
# - name: Compile documentation
|
||||
# run: |
|
||||
# pip install -e .
|
||||
# python -m pip install sphinx sphinx_rtd_theme
|
||||
# cd docs
|
||||
# make html
|
||||
# - name: Deploy to GitHub pages
|
||||
# if: ${{ github.ref == 'refs/heads/main' }}
|
||||
# uses: JamesIves/github-pages-deploy-action@3.6.2
|
||||
# with:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# BRANCH: gh-pages
|
||||
# FOLDER: docs/_build/html
|
||||
# CLEAN: true
|
||||
pip freeze > installed_all_dependencies_${{ matrix.python-version }}_${{ matrix.os }}.txt
|
||||
python test/check_dependency.py > installed_first_tier_dependencies_${{ matrix.python-version }}_${{ matrix.os }}.txt
|
||||
git add installed_*dependencies*.txt
|
||||
mv coverage.xml ./coverage_${{ matrix.python-version }}_${{ matrix.os }}.xml || true
|
||||
git add -f ./coverage_${{ matrix.python-version }}_${{ matrix.os }}.xml || true
|
||||
git commit -m "Update installed dependencies for Python ${{ matrix.python-version }} on ${{ matrix.os }}" || exit 0
|
||||
git push origin "$BRANCH"
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -172,7 +172,7 @@ test/default
|
||||
test/housing.json
|
||||
test/nlp/default/transformer_ms/seq-classification.json
|
||||
|
||||
flaml/fabric/fanova/_fanova.c
|
||||
flaml/fabric/fanova/*fanova.c
|
||||
# local config files
|
||||
*.config.local
|
||||
|
||||
@@ -184,3 +184,7 @@ notebook/lightning_logs/
|
||||
lightning_logs/
|
||||
flaml/autogen/extensions/tmp/
|
||||
test/autogen/my_tmp/
|
||||
catboost_*
|
||||
|
||||
# Internal configs
|
||||
.pypirc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# basic setup
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.8
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.10
|
||||
RUN apt-get update && apt-get -y update
|
||||
RUN apt-get install -y sudo git npm
|
||||
|
||||
|
||||
56
README.md
56
README.md
@@ -14,15 +14,9 @@
|
||||
<br>
|
||||
</p>
|
||||
|
||||
:fire: FLAML supports AutoML and Hyperparameter Tuning in [Microsoft Fabric Data Science](https://learn.microsoft.com/en-us/fabric/data-science/automated-machine-learning-fabric). In addition, we've introduced Python 3.11 support, along with a range of new estimators, and comprehensive integration with MLflow—thanks to contributions from the Microsoft Fabric product team.
|
||||
:fire: FLAML supports AutoML and Hyperparameter Tuning in [Microsoft Fabric Data Science](https://learn.microsoft.com/en-us/fabric/data-science/automated-machine-learning-fabric). In addition, we've introduced Python 3.11 and 3.12 support, along with a range of new estimators, and comprehensive integration with MLflow—thanks to contributions from the Microsoft Fabric product team.
|
||||
|
||||
:fire: Heads-up: We have migrated [AutoGen](https://microsoft.github.io/autogen/) into a dedicated [github repository](https://github.com/microsoft/autogen). Alongside this move, we have also launched a dedicated [Discord](https://discord.gg/pAbnFJrkgZ) server and a [website](https://microsoft.github.io/autogen/) for comprehensive documentation.
|
||||
|
||||
:fire: The automated multi-agent chat framework in [AutoGen](https://microsoft.github.io/autogen/) is in preview from v2.0.0.
|
||||
|
||||
:fire: FLAML is highlighted in OpenAI's [cookbook](https://github.com/openai/openai-cookbook#related-resources-from-around-the-web).
|
||||
|
||||
:fire: [autogen](https://microsoft.github.io/autogen/) is released with support for ChatGPT and GPT-4, based on [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673).
|
||||
:fire: Heads-up: [AutoGen](https://microsoft.github.io/autogen/) has moved to a dedicated [GitHub repository](https://github.com/microsoft/autogen). FLAML no longer includes the `autogen` module—please use AutoGen directly.
|
||||
|
||||
## What is FLAML
|
||||
|
||||
@@ -30,7 +24,7 @@ FLAML is a lightweight Python library for efficient automation of machine
|
||||
learning and AI operations. It automates workflow based on large language models, machine learning models, etc.
|
||||
and optimizes their performance.
|
||||
|
||||
- FLAML enables building next-gen GPT-X applications based on multi-agent conversations with minimal effort. It simplifies the orchestration, automation and optimization of a complex GPT-X workflow. It maximizes the performance of GPT-X models and augments their weakness.
|
||||
- FLAML enables economical automation and tuning for ML/AI workflows, including model selection and hyperparameter optimization under resource constraints.
|
||||
- For common machine learning tasks like classification and regression, it quickly finds quality models for user-provided data with low computational resources. It is easy to customize or extend. Users can find their desired customizability from a smooth range.
|
||||
- It supports fast and economical automatic tuning (e.g., inference hyperparameters for foundation models, configurations in MLOps/LMOps workflows, pipelines, mathematical/statistical models, algorithms, computing experiments, software configurations), capable of handling large search space with heterogeneous evaluation cost and complex constraints/guidance/early stopping.
|
||||
|
||||
@@ -40,16 +34,16 @@ FLAML has a .NET implementation in [ML.NET](http://dot.net/ml), an open-source,
|
||||
|
||||
## Installation
|
||||
|
||||
FLAML requires **Python version >= 3.8**. It can be installed from pip:
|
||||
FLAML requires **Python version >= 3.9**. It can be installed from pip:
|
||||
|
||||
```bash
|
||||
pip install flaml
|
||||
```
|
||||
|
||||
Minimal dependencies are installed without extra options. You can install extra options based on the feature you need. For example, use the following to install the dependencies needed by the [`autogen`](https://microsoft.github.io/autogen/) package.
|
||||
Minimal dependencies are installed without extra options. You can install extra options based on the feature you need. For example, use the following to install the dependencies needed by the [`automl`](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML) module.
|
||||
|
||||
```bash
|
||||
pip install "flaml[autogen]"
|
||||
pip install "flaml[automl]"
|
||||
```
|
||||
|
||||
Find more options in [Installation](https://microsoft.github.io/FLAML/docs/Installation).
|
||||
@@ -57,39 +51,6 @@ Each of the [`notebook examples`](https://github.com/microsoft/FLAML/tree/main/n
|
||||
|
||||
## Quickstart
|
||||
|
||||
- (New) The [autogen](https://microsoft.github.io/autogen/) package enables the next-gen GPT-X applications with a generic multi-agent conversation framework.
|
||||
It offers customizable and conversable agents which integrate LLMs, tools and human.
|
||||
By automating chat among multiple capable agents, one can easily make them collectively perform tasks autonomously or with human feedback, including tasks that require using tools via code. For example,
|
||||
|
||||
```python
|
||||
from flaml import autogen
|
||||
|
||||
assistant = autogen.AssistantAgent("assistant")
|
||||
user_proxy = autogen.UserProxyAgent("user_proxy")
|
||||
user_proxy.initiate_chat(
|
||||
assistant,
|
||||
message="Show me the YTD gain of 10 largest technology companies as of today.",
|
||||
)
|
||||
# This initiates an automated chat between the two agents to solve the task
|
||||
```
|
||||
|
||||
Autogen also helps maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4. It offers a drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
|
||||
|
||||
```python
|
||||
# perform tuning
|
||||
config, analysis = autogen.Completion.tune(
|
||||
data=tune_data,
|
||||
metric="success",
|
||||
mode="max",
|
||||
eval_func=eval_func,
|
||||
inference_budget=0.05,
|
||||
optimization_budget=3,
|
||||
num_samples=-1,
|
||||
)
|
||||
# perform inference for a test instance
|
||||
response = autogen.Completion.create(context=test_instance, **config)
|
||||
```
|
||||
|
||||
- With three lines of code, you can start using this economical and fast
|
||||
AutoML engine as a [scikit-learn style estimator](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML).
|
||||
|
||||
@@ -111,7 +72,10 @@ automl.fit(X_train, y_train, task="classification", estimator_list=["lgbm"])
|
||||
|
||||
```python
|
||||
from flaml import tune
|
||||
tune.run(evaluation_function, config={…}, low_cost_partial_config={…}, time_budget_s=3600)
|
||||
|
||||
tune.run(
|
||||
evaluation_function, config={…}, low_cost_partial_config={…}, time_budget_s=3600
|
||||
)
|
||||
```
|
||||
|
||||
- [Zero-shot AutoML](https://microsoft.github.io/FLAML/docs/Use-Cases/Zero-Shot-AutoML) allows using the existing training API from lightgbm, xgboost etc. while getting the benefit of AutoML in choosing high-performance hyperparameter configurations per task.
|
||||
|
||||
11520
coverage_3.10_windows-latest.xml
Normal file
11520
coverage_3.10_windows-latest.xml
Normal file
File diff suppressed because it is too large
Load Diff
11527
coverage_3.11_macos-latest.xml
Normal file
11527
coverage_3.11_macos-latest.xml
Normal file
File diff suppressed because it is too large
Load Diff
11947
coverage_3.11_ubuntu-latest.xml
Normal file
11947
coverage_3.11_ubuntu-latest.xml
Normal file
File diff suppressed because it is too large
Load Diff
11999
coverage_3.11_windows-latest.xml
Normal file
11999
coverage_3.11_windows-latest.xml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from flaml.automl import AutoML, logger_formatter
|
||||
@@ -12,7 +13,8 @@ from flaml.version import __version__
|
||||
|
||||
# Set the root logger.
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
if logger.level == logging.NOTSET:
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
if not has_automl:
|
||||
logger.warning("flaml.automl is not available. Please install flaml[automl] to enable AutoML functionalities.")
|
||||
warnings.warn("flaml.automl is not available. Please install flaml[automl] to enable AutoML functionalities.")
|
||||
|
||||
@@ -156,7 +156,7 @@ class MathUserProxyAgent(UserProxyAgent):
|
||||
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
|
||||
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
|
||||
max_invalid_q_per_step (int): (ADDED) the maximum number of invalid queries per step.
|
||||
**kwargs (dict): other kwargs in [UserProxyAgent](user_proxy_agent#__init__).
|
||||
**kwargs (dict): other kwargs in [UserProxyAgent](../user_proxy_agent#__init__).
|
||||
"""
|
||||
super().__init__(
|
||||
name=name,
|
||||
|
||||
@@ -123,7 +123,7 @@ class RetrieveUserProxyAgent(UserProxyAgent):
|
||||
can be found at `https://www.sbert.net/docs/pretrained_models.html`. The default model is a
|
||||
fast model. If you want to use a high performance model, `all-mpnet-base-v2` is recommended.
|
||||
- customized_prompt (Optional, str): the customized prompt for the retrieve chat. Default is None.
|
||||
**kwargs (dict): other kwargs in [UserProxyAgent](user_proxy_agent#__init__).
|
||||
**kwargs (dict): other kwargs in [UserProxyAgent](../user_proxy_agent#__init__).
|
||||
"""
|
||||
super().__init__(
|
||||
name=name,
|
||||
|
||||
@@ -10,6 +10,7 @@ import os
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
from concurrent.futures import as_completed
|
||||
from functools import partial
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
@@ -187,9 +188,16 @@ class AutoML(BaseEstimator):
|
||||
mem_thres: A float of the memory size constraint in bytes.
|
||||
pred_time_limit: A float of the prediction latency constraint in seconds.
|
||||
It refers to the average prediction time per row in validation data.
|
||||
train_time_limit: A float of the training time constraint in seconds.
|
||||
train_time_limit: None or a float of the training time constraint in seconds for each trial.
|
||||
Only valid for sequential search.
|
||||
verbose: int, default=3 | Controls the verbosity, higher means more
|
||||
messages.
|
||||
verbose=0: logger level = CRITICAL
|
||||
verbose=1: logger level = ERROR
|
||||
verbose=2: logger level = WARNING
|
||||
verbose=3: logger level = INFO
|
||||
verbose=4: logger level = DEBUG
|
||||
verbose>5: logger level = NOTSET
|
||||
retrain_full: bool or str, default=True | whether to retrain the
|
||||
selected model on the full training data when using holdout.
|
||||
True - retrain only after search finishes; False - no retraining;
|
||||
@@ -203,7 +211,7 @@ class AutoML(BaseEstimator):
|
||||
* Valid str options depend on different tasks.
|
||||
For classification tasks, valid choices are
|
||||
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
|
||||
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
|
||||
For regression tasks, valid choices are ["auto", 'uniform', 'time', 'group'].
|
||||
"auto" -> uniform.
|
||||
For time series forecast tasks, must be "auto" or 'time'.
|
||||
For ranking task, must be "auto" or 'group'.
|
||||
@@ -393,6 +401,24 @@ class AutoML(BaseEstimator):
|
||||
self._estimator_type = "classifier" if settings["task"] in CLASSIFICATION else "regressor"
|
||||
self.best_run_id = None
|
||||
|
||||
def __getstate__(self):
|
||||
"""Customize pickling to avoid serializing runtime-only objects.
|
||||
|
||||
MLflow's sklearn flavor serializes estimators via (cloud)pickle. During
|
||||
AutoML fitting we may attach an internal mlflow integration instance
|
||||
which holds `concurrent.futures.Future` objects and executors containing
|
||||
thread locks, which are not picklable.
|
||||
"""
|
||||
|
||||
state = self.__dict__.copy()
|
||||
state.pop("mlflow_integration", None)
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.__dict__.update(state)
|
||||
# Ensure attribute exists post-unpickle.
|
||||
self.mlflow_integration = None
|
||||
|
||||
def get_params(self, deep: bool = False) -> dict:
|
||||
return self._settings.copy()
|
||||
|
||||
@@ -424,6 +450,8 @@ class AutoML(BaseEstimator):
|
||||
If `model_history` was set to True, then the returned model is trained.
|
||||
"""
|
||||
state = self._search_states.get(estimator_name)
|
||||
if state and estimator_name == self._best_estimator:
|
||||
return self.model
|
||||
return state and getattr(state, "trained_estimator", None)
|
||||
|
||||
@property
|
||||
@@ -739,7 +767,7 @@ class AutoML(BaseEstimator):
|
||||
* Valid str options depend on different tasks.
|
||||
For classification tasks, valid choices are
|
||||
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
|
||||
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
|
||||
For regression tasks, valid choices are ["auto", 'uniform', 'time', 'group'].
|
||||
"auto" -> uniform.
|
||||
For time series forecast tasks, must be "auto" or 'time'.
|
||||
For ranking task, must be "auto" or 'group'.
|
||||
@@ -1332,7 +1360,8 @@ class AutoML(BaseEstimator):
|
||||
mem_thres: A float of the memory size constraint in bytes.
|
||||
pred_time_limit: A float of the prediction latency constraint in seconds.
|
||||
It refers to the average prediction time per row in validation data.
|
||||
train_time_limit: None or a float of the training time constraint in seconds.
|
||||
train_time_limit: None or a float of the training time constraint in seconds for each trial.
|
||||
Only valid for sequential search.
|
||||
X_val: None or a numpy array or a pandas dataframe of validation data.
|
||||
y_val: None or a numpy array or a pandas series of validation labels.
|
||||
sample_weight_val: None or a numpy array of the sample weight of
|
||||
@@ -1345,6 +1374,12 @@ class AutoML(BaseEstimator):
|
||||
for training data.
|
||||
verbose: int, default=3 | Controls the verbosity, higher means more
|
||||
messages.
|
||||
verbose=0: logger level = CRITICAL
|
||||
verbose=1: logger level = ERROR
|
||||
verbose=2: logger level = WARNING
|
||||
verbose=3: logger level = INFO
|
||||
verbose=4: logger level = DEBUG
|
||||
verbose>5: logger level = NOTSET
|
||||
retrain_full: bool or str, default=True | whether to retrain the
|
||||
selected model on the full training data when using holdout.
|
||||
True - retrain only after search finishes; False - no retraining;
|
||||
@@ -1358,7 +1393,7 @@ class AutoML(BaseEstimator):
|
||||
* Valid str options depend on different tasks.
|
||||
For classification tasks, valid choices are
|
||||
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
|
||||
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
|
||||
For regression tasks, valid choices are ["auto", 'uniform', 'time', 'group'].
|
||||
"auto" -> uniform.
|
||||
For time series forecast tasks, must be "auto" or 'time'.
|
||||
For ranking task, must be "auto" or 'group'.
|
||||
@@ -1623,6 +1658,13 @@ class AutoML(BaseEstimator):
|
||||
_ch.setFormatter(logger_formatter)
|
||||
logger.addHandler(_ch)
|
||||
|
||||
if model_history:
|
||||
logger.warning(
|
||||
"With `model_history` set to `True` by default, all intermediate models are retained in memory, "
|
||||
"which may significantly increase memory usage and slow down training. "
|
||||
"Consider setting `model_history=False` to optimize memory and accelerate the training process."
|
||||
)
|
||||
|
||||
if not use_ray and not use_spark and n_concurrent_trials > 1:
|
||||
if ray_available:
|
||||
logger.warning(
|
||||
@@ -1708,7 +1750,7 @@ class AutoML(BaseEstimator):
|
||||
if not (mlflow.active_run() is not None or is_autolog_enabled()):
|
||||
self.mlflow_integration.only_history = True
|
||||
except KeyError:
|
||||
print("Not in Fabric, Skipped")
|
||||
logger.info("Not in Fabric, Skipped")
|
||||
task.validate_data(
|
||||
self,
|
||||
self._state,
|
||||
@@ -2529,6 +2571,21 @@ class AutoML(BaseEstimator):
|
||||
self._selected = state = self._search_states[estimator]
|
||||
state.best_config_sample_size = self._state.data_size[0]
|
||||
state.best_config = state.init_config[0] if state.init_config else {}
|
||||
self._track_iter = 0
|
||||
self._config_history[self._track_iter] = (estimator, state.best_config, self._state.time_from_start)
|
||||
self._best_iteration = self._track_iter
|
||||
state.val_loss = getattr(state, "val_loss", float("inf"))
|
||||
state.best_loss = getattr(state, "best_loss", float("inf"))
|
||||
state.config = getattr(state, "config", state.best_config.copy())
|
||||
state.metric_for_logging = getattr(state, "metric_for_logging", None)
|
||||
state.sample_size = getattr(state, "sample_size", self._state.data_size[0])
|
||||
state.learner_class = getattr(state, "learner_class", self._state.learner_classes.get(estimator))
|
||||
if hasattr(self, "mlflow_integration") and self.mlflow_integration:
|
||||
self.mlflow_integration.record_state(
|
||||
automl=self,
|
||||
search_state=state,
|
||||
estimator=estimator,
|
||||
)
|
||||
elif self._use_ray is False and self._use_spark is False:
|
||||
self._search_sequential()
|
||||
else:
|
||||
@@ -2700,16 +2757,47 @@ class AutoML(BaseEstimator):
|
||||
):
|
||||
if mlflow.active_run() is None:
|
||||
mlflow.start_run(run_id=self.mlflow_integration.parent_run_id)
|
||||
self.mlflow_integration.log_model(
|
||||
self._trained_estimator.model,
|
||||
self.best_estimator,
|
||||
signature=self.estimator_signature,
|
||||
)
|
||||
self.mlflow_integration.pickle_and_log_automl_artifacts(
|
||||
self, self.model, self.best_estimator, signature=self.pipeline_signature
|
||||
)
|
||||
if self.best_estimator.endswith("_spark"):
|
||||
self.mlflow_integration.log_model(
|
||||
self._trained_estimator.model,
|
||||
self.best_estimator,
|
||||
signature=self.estimator_signature,
|
||||
run_id=self.mlflow_integration.parent_run_id,
|
||||
)
|
||||
else:
|
||||
self.mlflow_integration.pickle_and_log_automl_artifacts(
|
||||
self,
|
||||
self.model,
|
||||
self.best_estimator,
|
||||
signature=self.pipeline_signature,
|
||||
run_id=self.mlflow_integration.parent_run_id,
|
||||
)
|
||||
else:
|
||||
logger.info("not retraining because the time budget is too small.")
|
||||
logger.warning("not retraining because the time budget is too small.")
|
||||
self.wait_futures()
|
||||
|
||||
def wait_futures(self):
|
||||
if self.mlflow_integration is not None:
|
||||
logger.debug("Collecting results from submitted record_state tasks")
|
||||
t1 = time.perf_counter()
|
||||
for future in as_completed(self.mlflow_integration.futures):
|
||||
_task = self.mlflow_integration.futures[future]
|
||||
try:
|
||||
result = future.result()
|
||||
logger.debug(f"Result for record_state task {_task}: {result}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Exception for record_state task {_task}: {e}")
|
||||
for future in as_completed(self.mlflow_integration.futures_log_model):
|
||||
_task = self.mlflow_integration.futures_log_model[future]
|
||||
try:
|
||||
result = future.result()
|
||||
logger.debug(f"Result for log_model task {_task}: {result}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Exception for log_model task {_task}: {e}")
|
||||
t2 = time.perf_counter()
|
||||
logger.debug(f"Collecting results from tasks submitted to executors costs {t2-t1} seconds.")
|
||||
else:
|
||||
logger.debug("No futures to wait for.")
|
||||
|
||||
def __del__(self):
|
||||
if (
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
try:
|
||||
from sklearn.ensemble import HistGradientBoostingClassifier, HistGradientBoostingRegressor
|
||||
except ImportError:
|
||||
pass
|
||||
except ImportError as e:
|
||||
print(f"scikit-learn is required for HistGradientBoostingEstimator. Please install it; error: {e}")
|
||||
|
||||
from flaml import tune
|
||||
from flaml.automl.model import SKLearnEstimator
|
||||
|
||||
@@ -2,13 +2,17 @@
|
||||
# * Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
import random
|
||||
import uuid
|
||||
from datetime import datetime, timedelta
|
||||
from decimal import ROUND_HALF_UP, Decimal
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.automl.spark import DataFrame, Series, pd, ps, psDataFrame, psSeries
|
||||
from flaml.automl.spark import DataFrame, F, Series, T, pd, ps, psDataFrame, psSeries
|
||||
from flaml.automl.training_log import training_log_reader
|
||||
|
||||
try:
|
||||
@@ -19,6 +23,7 @@ except ImportError:
|
||||
if TYPE_CHECKING:
|
||||
from flaml.automl.task import Task
|
||||
|
||||
|
||||
TS_TIMESTAMP_COL = "ds"
|
||||
TS_VALUE_COL = "y"
|
||||
|
||||
@@ -45,7 +50,10 @@ def load_openml_dataset(dataset_id, data_dir=None, random_state=0, dataset_forma
|
||||
"""
|
||||
import pickle
|
||||
|
||||
import openml
|
||||
try:
|
||||
import openml
|
||||
except ImportError:
|
||||
openml = None
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
filename = "openml_ds" + str(dataset_id) + ".pkl"
|
||||
@@ -56,15 +64,15 @@ def load_openml_dataset(dataset_id, data_dir=None, random_state=0, dataset_forma
|
||||
dataset = pickle.load(f)
|
||||
else:
|
||||
print("download dataset from openml")
|
||||
dataset = openml.datasets.get_dataset(dataset_id)
|
||||
dataset = openml.datasets.get_dataset(dataset_id) if openml else None
|
||||
if not os.path.exists(data_dir):
|
||||
os.makedirs(data_dir)
|
||||
with open(filepath, "wb") as f:
|
||||
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
|
||||
print("Dataset name:", dataset.name)
|
||||
print("Dataset name:", dataset.name) if dataset else None
|
||||
try:
|
||||
X, y, *__ = dataset.get_data(target=dataset.default_target_attribute, dataset_format=dataset_format)
|
||||
except ValueError:
|
||||
except (ValueError, AttributeError, TypeError):
|
||||
from sklearn.datasets import fetch_openml
|
||||
|
||||
X, y = fetch_openml(data_id=dataset_id, return_X_y=True)
|
||||
@@ -445,3 +453,331 @@ class DataTransformer:
|
||||
def group_counts(groups):
|
||||
_, i, c = np.unique(groups, return_counts=True, return_index=True)
|
||||
return c[np.argsort(i)]
|
||||
|
||||
|
||||
def get_random_dataframe(n_rows: int = 200, ratio_none: float = 0.1, seed: int = 42) -> DataFrame:
|
||||
"""Generate a random pandas DataFrame with various data types for testing.
|
||||
This function creates a DataFrame with multiple column types including:
|
||||
- Timestamps
|
||||
- Integers
|
||||
- Floats
|
||||
- Categorical values
|
||||
- Booleans
|
||||
- Lists (tags)
|
||||
- Decimal strings
|
||||
- UUIDs
|
||||
- Binary data (as hex strings)
|
||||
- JSON blobs
|
||||
- Nullable text fields
|
||||
Parameters
|
||||
----------
|
||||
n_rows : int, default=200
|
||||
Number of rows in the generated DataFrame
|
||||
ratio_none : float, default=0.1
|
||||
Probability of generating None values in applicable columns
|
||||
seed : int, default=42
|
||||
Random seed for reproducibility
|
||||
Returns
|
||||
-------
|
||||
pd.DataFrame
|
||||
A DataFrame with 14 columns of various data types
|
||||
Examples
|
||||
--------
|
||||
>>> df = get_random_dataframe(100, 0.05, 123)
|
||||
>>> df.shape
|
||||
(100, 14)
|
||||
>>> df.dtypes
|
||||
timestamp datetime64[ns]
|
||||
id int64
|
||||
score float64
|
||||
status object
|
||||
flag object
|
||||
count object
|
||||
value object
|
||||
tags object
|
||||
rating object
|
||||
uuid object
|
||||
binary object
|
||||
json_blob object
|
||||
category category
|
||||
nullable_text object
|
||||
dtype: object
|
||||
"""
|
||||
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
|
||||
def random_tags():
|
||||
tags = ["AI", "ML", "data", "robotics", "vision"]
|
||||
return random.sample(tags, k=random.randint(1, 3)) if random.random() > ratio_none else None
|
||||
|
||||
def random_decimal():
|
||||
return (
|
||||
str(Decimal(random.uniform(1, 5)).quantize(Decimal("0.01"), rounding=ROUND_HALF_UP))
|
||||
if random.random() > ratio_none
|
||||
else None
|
||||
)
|
||||
|
||||
def random_json_blob():
|
||||
blob = {"a": random.randint(1, 10), "b": random.random()}
|
||||
return json.dumps(blob) if random.random() > ratio_none else None
|
||||
|
||||
def random_binary():
|
||||
return bytes(random.randint(0, 255) for _ in range(4)).hex() if random.random() > ratio_none else None
|
||||
|
||||
data = {
|
||||
"timestamp": [
|
||||
datetime(2020, 1, 1) + timedelta(days=np.random.randint(0, 1000)) if np.random.rand() > ratio_none else None
|
||||
for _ in range(n_rows)
|
||||
],
|
||||
"id": range(1, n_rows + 1),
|
||||
"score": np.random.uniform(0, 100, n_rows),
|
||||
"status": np.random.choice(
|
||||
["active", "inactive", "pending", None],
|
||||
size=n_rows,
|
||||
p=[(1 - ratio_none) / 3, (1 - ratio_none) / 3, (1 - ratio_none) / 3, ratio_none],
|
||||
),
|
||||
"flag": np.random.choice(
|
||||
[True, False, None], size=n_rows, p=[(1 - ratio_none) / 2, (1 - ratio_none) / 2, ratio_none]
|
||||
),
|
||||
"count": [np.random.randint(0, 100) if np.random.rand() > ratio_none else None for _ in range(n_rows)],
|
||||
"value": [round(np.random.normal(50, 15), 2) if np.random.rand() > ratio_none else None for _ in range(n_rows)],
|
||||
"tags": [random_tags() for _ in range(n_rows)],
|
||||
"rating": [random_decimal() for _ in range(n_rows)],
|
||||
"uuid": [str(uuid.uuid4()) if np.random.rand() > ratio_none else None for _ in range(n_rows)],
|
||||
"binary": [random_binary() for _ in range(n_rows)],
|
||||
"json_blob": [random_json_blob() for _ in range(n_rows)],
|
||||
"category": pd.Categorical(
|
||||
np.random.choice(
|
||||
["A", "B", "C", None],
|
||||
size=n_rows,
|
||||
p=[(1 - ratio_none) / 3, (1 - ratio_none) / 3, (1 - ratio_none) / 3, ratio_none],
|
||||
)
|
||||
),
|
||||
"nullable_text": [random.choice(["Good", "Bad", "Average", None]) for _ in range(n_rows)],
|
||||
}
|
||||
|
||||
return pd.DataFrame(data)
|
||||
|
||||
|
||||
def auto_convert_dtypes_spark(
|
||||
df: psDataFrame,
|
||||
na_values: list = None,
|
||||
category_threshold: float = 0.3,
|
||||
convert_threshold: float = 0.6,
|
||||
sample_ratio: float = 0.1,
|
||||
) -> tuple[psDataFrame, dict]:
|
||||
"""Automatically convert data types in a PySpark DataFrame using heuristics.
|
||||
|
||||
This function analyzes a sample of the DataFrame to infer appropriate data types
|
||||
and applies the conversions. It handles timestamps, numeric values, booleans,
|
||||
and categorical fields.
|
||||
|
||||
Args:
|
||||
df: A PySpark DataFrame to convert.
|
||||
na_values: List of strings to be considered as NA/NaN. Defaults to
|
||||
['NA', 'na', 'NULL', 'null', ''].
|
||||
category_threshold: Maximum ratio of unique values to total values
|
||||
to consider a column categorical. Defaults to 0.3.
|
||||
convert_threshold: Minimum ratio of successfully converted values required
|
||||
to apply a type conversion. Defaults to 0.6.
|
||||
sample_ratio: Fraction of data to sample for type inference. Defaults to 0.1.
|
||||
|
||||
Returns:
|
||||
tuple: (The DataFrame with converted types, A dictionary mapping column names to
|
||||
their inferred types as strings)
|
||||
|
||||
Note:
|
||||
- 'category' in the schema dict is conceptual as PySpark doesn't have a true
|
||||
category type like pandas
|
||||
- The function uses sampling for efficiency with large datasets
|
||||
"""
|
||||
n_rows = df.count()
|
||||
if na_values is None:
|
||||
na_values = ["NA", "na", "NULL", "null", ""]
|
||||
|
||||
# Normalize NA-like values
|
||||
for colname, coltype in df.dtypes:
|
||||
if coltype == "string":
|
||||
df = df.withColumn(
|
||||
colname,
|
||||
F.when(F.trim(F.lower(F.col(colname))).isin([v.lower() for v in na_values]), None).otherwise(
|
||||
F.col(colname)
|
||||
),
|
||||
)
|
||||
|
||||
schema = {}
|
||||
for colname in df.columns:
|
||||
# Sample once at an appropriate ratio
|
||||
sample_ratio_to_use = min(1.0, sample_ratio if n_rows * sample_ratio > 100 else 100 / n_rows)
|
||||
col_sample = df.select(colname).sample(withReplacement=False, fraction=sample_ratio_to_use).dropna()
|
||||
sample_count = col_sample.count()
|
||||
|
||||
inferred_type = "string" # Default
|
||||
|
||||
if col_sample.dtypes[0][1] != "string":
|
||||
schema[colname] = col_sample.dtypes[0][1]
|
||||
continue
|
||||
|
||||
if sample_count == 0:
|
||||
schema[colname] = "string"
|
||||
continue
|
||||
|
||||
# Check if timestamp
|
||||
ts_col = col_sample.withColumn("parsed", F.to_timestamp(F.col(colname)))
|
||||
|
||||
# Check numeric
|
||||
if (
|
||||
col_sample.withColumn("n", F.col(colname).cast("double")).filter("n is not null").count()
|
||||
>= sample_count * convert_threshold
|
||||
):
|
||||
# All whole numbers?
|
||||
all_whole = (
|
||||
col_sample.withColumn("n", F.col(colname).cast("double"))
|
||||
.filter("n is not null")
|
||||
.withColumn("frac", F.abs(F.col("n") % 1))
|
||||
.filter("frac > 0.000001")
|
||||
.count()
|
||||
== 0
|
||||
)
|
||||
inferred_type = "int" if all_whole else "double"
|
||||
|
||||
# Check low-cardinality (category-like)
|
||||
elif (
|
||||
sample_count > 0
|
||||
and col_sample.select(F.countDistinct(F.col(colname))).collect()[0][0] / sample_count <= category_threshold
|
||||
):
|
||||
inferred_type = "category" # Will just be string, but marked as such
|
||||
|
||||
# Check if timestamp
|
||||
elif ts_col.filter(F.col("parsed").isNotNull()).count() >= sample_count * convert_threshold:
|
||||
inferred_type = "timestamp"
|
||||
|
||||
schema[colname] = inferred_type
|
||||
|
||||
# Apply inferred schema
|
||||
for colname, inferred_type in schema.items():
|
||||
if inferred_type == "int":
|
||||
df = df.withColumn(colname, F.col(colname).cast(T.IntegerType()))
|
||||
elif inferred_type == "double":
|
||||
df = df.withColumn(colname, F.col(colname).cast(T.DoubleType()))
|
||||
elif inferred_type == "boolean":
|
||||
df = df.withColumn(
|
||||
colname,
|
||||
F.when(F.lower(F.col(colname)).isin("true", "yes", "1"), True)
|
||||
.when(F.lower(F.col(colname)).isin("false", "no", "0"), False)
|
||||
.otherwise(None),
|
||||
)
|
||||
elif inferred_type == "timestamp":
|
||||
df = df.withColumn(colname, F.to_timestamp(F.col(colname)))
|
||||
elif inferred_type == "category":
|
||||
df = df.withColumn(colname, F.col(colname).cast(T.StringType())) # Marked conceptually
|
||||
|
||||
# otherwise keep as string (or original type)
|
||||
|
||||
return df, schema
|
||||
|
||||
|
||||
def auto_convert_dtypes_pandas(
|
||||
df: DataFrame,
|
||||
na_values: list = None,
|
||||
category_threshold: float = 0.3,
|
||||
convert_threshold: float = 0.6,
|
||||
sample_ratio: float = 1.0,
|
||||
) -> tuple[DataFrame, dict]:
|
||||
"""Automatically convert data types in a pandas DataFrame using heuristics.
|
||||
|
||||
This function analyzes the DataFrame to infer appropriate data types
|
||||
and applies the conversions. It handles timestamps, timedeltas, numeric values,
|
||||
and categorical fields.
|
||||
|
||||
Args:
|
||||
df: A pandas DataFrame to convert.
|
||||
na_values: List of strings to be considered as NA/NaN. Defaults to
|
||||
['NA', 'na', 'NULL', 'null', ''].
|
||||
category_threshold: Maximum ratio of unique values to total values
|
||||
to consider a column categorical. Defaults to 0.3.
|
||||
convert_threshold: Minimum ratio of successfully converted values required
|
||||
to apply a type conversion. Defaults to 0.6.
|
||||
sample_ratio: Fraction of data to sample for type inference. Not used in pandas version
|
||||
but included for API compatibility. Defaults to 1.0.
|
||||
|
||||
Returns:
|
||||
tuple: (The DataFrame with converted types, A dictionary mapping column names to
|
||||
their inferred types as strings)
|
||||
"""
|
||||
if na_values is None:
|
||||
na_values = {"NA", "na", "NULL", "null", ""}
|
||||
|
||||
df_converted = df.convert_dtypes()
|
||||
schema = {}
|
||||
|
||||
# Sample if needed (for API compatibility)
|
||||
if sample_ratio < 1.0:
|
||||
df = df.sample(frac=sample_ratio)
|
||||
|
||||
n_rows = len(df)
|
||||
|
||||
for col in df.columns:
|
||||
series = df[col]
|
||||
# Replace NA-like values if string
|
||||
series_cleaned = series.map(lambda x: np.nan if isinstance(x, str) and x.strip() in na_values else x)
|
||||
|
||||
# Skip conversion if already non-object data type, except bool which can potentially be categorical
|
||||
if (
|
||||
not isinstance(series_cleaned.dtype, pd.BooleanDtype)
|
||||
and not isinstance(series_cleaned.dtype, pd.StringDtype)
|
||||
and series_cleaned.dtype != "object"
|
||||
):
|
||||
# Keep the original data type for non-object dtypes
|
||||
df_converted[col] = series
|
||||
schema[col] = str(series_cleaned.dtype)
|
||||
continue
|
||||
|
||||
# print(f"type: {series_cleaned.dtype}, column: {series_cleaned.name}")
|
||||
|
||||
if not isinstance(series_cleaned.dtype, pd.BooleanDtype):
|
||||
# Try numeric (int or float)
|
||||
numeric = pd.to_numeric(series_cleaned, errors="coerce")
|
||||
if numeric.notna().sum() >= n_rows * convert_threshold:
|
||||
if (numeric.dropna() % 1 == 0).all():
|
||||
try:
|
||||
df_converted[col] = numeric.astype("int") # Nullable integer
|
||||
schema[col] = "int"
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
df_converted[col] = numeric.astype("double")
|
||||
schema[col] = "double"
|
||||
continue
|
||||
|
||||
# Try datetime
|
||||
datetime_converted = pd.to_datetime(series_cleaned, errors="coerce")
|
||||
if datetime_converted.notna().sum() >= n_rows * convert_threshold:
|
||||
df_converted[col] = datetime_converted
|
||||
schema[col] = "timestamp"
|
||||
continue
|
||||
|
||||
# Try timedelta
|
||||
try:
|
||||
timedelta_converted = pd.to_timedelta(series_cleaned, errors="coerce")
|
||||
if timedelta_converted.notna().sum() >= n_rows * convert_threshold:
|
||||
df_converted[col] = timedelta_converted
|
||||
schema[col] = "timedelta"
|
||||
continue
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# Try category
|
||||
try:
|
||||
unique_ratio = series_cleaned.nunique(dropna=True) / n_rows if n_rows > 0 else 1.0
|
||||
if unique_ratio <= category_threshold:
|
||||
df_converted[col] = series_cleaned.astype("category")
|
||||
schema[col] = "category"
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
df_converted[col] = series_cleaned.astype("string")
|
||||
schema[col] = "string"
|
||||
|
||||
return df_converted, schema
|
||||
|
||||
@@ -1,7 +1,37 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
|
||||
class ColoredFormatter(logging.Formatter):
|
||||
# ANSI escape codes for colors
|
||||
COLORS = {
|
||||
# logging.DEBUG: "\033[36m", # Cyan
|
||||
# logging.INFO: "\033[32m", # Green
|
||||
logging.WARNING: "\033[33m", # Yellow
|
||||
logging.ERROR: "\033[31m", # Red
|
||||
logging.CRITICAL: "\033[1;31m", # Bright Red
|
||||
}
|
||||
RESET = "\033[0m" # Reset to default
|
||||
|
||||
def __init__(self, fmt, datefmt, use_color=True):
|
||||
super().__init__(fmt, datefmt)
|
||||
self.use_color = use_color
|
||||
|
||||
def format(self, record):
|
||||
formatted = super().format(record)
|
||||
if self.use_color:
|
||||
color = self.COLORS.get(record.levelno, "")
|
||||
if color:
|
||||
return f"{color}{formatted}{self.RESET}"
|
||||
return formatted
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_formatter = logging.Formatter(
|
||||
"[%(name)s: %(asctime)s] {%(lineno)d} %(levelname)s - %(message)s", "%m-%d %H:%M:%S"
|
||||
use_color = True
|
||||
if os.getenv("FLAML_LOG_NO_COLOR"):
|
||||
use_color = False
|
||||
|
||||
logger_formatter = ColoredFormatter(
|
||||
"[%(name)s: %(asctime)s] {%(lineno)d} %(levelname)s - %(message)s", "%m-%d %H:%M:%S", use_color
|
||||
)
|
||||
logger.propagate = False
|
||||
|
||||
@@ -127,9 +127,21 @@ def metric_loss_score(
|
||||
import datasets
|
||||
|
||||
datasets_metric_name = huggingface_submetric_to_metric.get(metric_name, metric_name.split(":")[0])
|
||||
metric = datasets.load_metric(datasets_metric_name, trust_remote_code=True)
|
||||
metric_mode = huggingface_metric_to_mode[datasets_metric_name]
|
||||
|
||||
# datasets>=3 removed load_metric; prefer evaluate if available
|
||||
try:
|
||||
import evaluate
|
||||
|
||||
metric = evaluate.load(datasets_metric_name, trust_remote_code=True)
|
||||
except Exception:
|
||||
if hasattr(datasets, "load_metric"):
|
||||
metric = datasets.load_metric(datasets_metric_name, trust_remote_code=True)
|
||||
else:
|
||||
from datasets import load_metric as _load_metric # older datasets
|
||||
|
||||
metric = _load_metric(datasets_metric_name, trust_remote_code=True)
|
||||
|
||||
if metric_name.startswith("seqeval"):
|
||||
y_processed_true = [[labels[tr] for tr in each_list] for each_list in y_processed_true]
|
||||
elif metric in ("pearsonr", "spearmanr"):
|
||||
|
||||
@@ -9,6 +9,7 @@ import os
|
||||
import shutil
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
@@ -89,24 +90,28 @@ def limit_resource(memory_limit, time_limit):
|
||||
except ValueError:
|
||||
# According to https://bugs.python.org/issue40518, it's a mac-specific error.
|
||||
pass
|
||||
main_thread = False
|
||||
if time_limit is not None:
|
||||
alarm_set = False
|
||||
if time_limit is not None and threading.current_thread() is threading.main_thread():
|
||||
try:
|
||||
signal.signal(signal.SIGALRM, TimeoutHandler)
|
||||
signal.alarm(int(time_limit) or 1)
|
||||
main_thread = True
|
||||
alarm_set = True
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if main_thread:
|
||||
if alarm_set:
|
||||
signal.alarm(0)
|
||||
if memory_limit > 0:
|
||||
resource.setrlimit(resource.RLIMIT_AS, (soft, hard))
|
||||
try:
|
||||
resource.setrlimit(resource.RLIMIT_AS, (soft, hard))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
class BaseEstimator:
|
||||
class BaseEstimator(sklearn.base.ClassifierMixin, sklearn.base.BaseEstimator):
|
||||
"""The abstract class for all learners.
|
||||
|
||||
Typical examples:
|
||||
@@ -130,7 +135,7 @@ class BaseEstimator:
|
||||
self._task = task if isinstance(task, Task) else task_factory(task, None, None)
|
||||
self.params = self.config2params(config)
|
||||
self.estimator_class = self._model = None
|
||||
if "_estimator_type" in config:
|
||||
if "_estimator_type" in self.params:
|
||||
self._estimator_type = self.params.pop("_estimator_type")
|
||||
else:
|
||||
self._estimator_type = "classifier" if self._task.is_classification() else "regressor"
|
||||
@@ -1691,7 +1696,7 @@ class XGBoostEstimator(SKLearnEstimator):
|
||||
# use_label_encoder is deprecated in 1.7.
|
||||
if xgboost_version < "1.7.0":
|
||||
params["use_label_encoder"] = params.get("use_label_encoder", False)
|
||||
if "n_jobs" in config:
|
||||
if "n_jobs" in params:
|
||||
params["nthread"] = params.pop("n_jobs")
|
||||
return params
|
||||
|
||||
@@ -1891,7 +1896,7 @@ class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
|
||||
params = super().config2params(config)
|
||||
if "max_leaves" in params:
|
||||
params["max_leaf_nodes"] = params.get("max_leaf_nodes", params.pop("max_leaves"))
|
||||
if not self._task.is_classification() and "criterion" in config:
|
||||
if not self._task.is_classification() and "criterion" in params:
|
||||
params.pop("criterion")
|
||||
if "random_state" not in params:
|
||||
params["random_state"] = 12032022
|
||||
@@ -2066,8 +2071,8 @@ class CatBoostEstimator(BaseEstimator):
|
||||
self.estimator_class = CatBoostRegressor
|
||||
|
||||
def fit(self, X_train, y_train, budget=None, free_mem_ratio=0, **kwargs):
|
||||
if "is_retrain" in kwargs:
|
||||
kwargs.pop("is_retrain")
|
||||
kwargs.pop("is_retrain", None)
|
||||
kwargs.pop("groups", None)
|
||||
start_time = time.time()
|
||||
deadline = start_time + budget if budget else np.inf
|
||||
train_dir = f"catboost_{str(start_time)}"
|
||||
@@ -2344,7 +2349,7 @@ class SGDEstimator(SKLearnEstimator):
|
||||
params["loss"] = params.get("loss", None)
|
||||
if params["loss"] is None and self._task.is_classification():
|
||||
params["loss"] = "log_loss" if SKLEARN_VERSION >= "1.1" else "log"
|
||||
if not self._task.is_classification():
|
||||
if not self._task.is_classification() and "n_jobs" in params:
|
||||
params.pop("n_jobs")
|
||||
|
||||
if params.get("penalty") != "elasticnet":
|
||||
@@ -2815,7 +2820,7 @@ class suppress_stdout_stderr:
|
||||
# Open a pair of null files
|
||||
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
|
||||
# Save the actual stdout (1) and stderr (2) file descriptors.
|
||||
self.save_fds = (os.dup(1), os.dup(2))
|
||||
self.save_fds = [os.dup(1), os.dup(2)]
|
||||
|
||||
def __enter__(self):
|
||||
# Assign the null pointers to stdout and stderr.
|
||||
@@ -2827,5 +2832,5 @@ class suppress_stdout_stderr:
|
||||
os.dup2(self.save_fds[0], 1)
|
||||
os.dup2(self.save_fds[1], 2)
|
||||
# Close the null files
|
||||
os.close(self.null_fds[0])
|
||||
os.close(self.null_fds[1])
|
||||
for fd in self.null_fds + self.save_fds:
|
||||
os.close(fd)
|
||||
|
||||
@@ -77,6 +77,14 @@ class TrainingArgumentsForAuto(TrainingArguments):
|
||||
|
||||
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
|
||||
|
||||
# Newer versions of HuggingFace Transformers may access `TrainingArguments.generation_config`
|
||||
# (e.g., in generation-aware trainers/callbacks). Keep this attribute to remain compatible
|
||||
# while defaulting to None for non-generation tasks.
|
||||
generation_config: Optional[object] = field(
|
||||
default=None,
|
||||
metadata={"help": "Optional generation config (or path) used by generation-aware trainers."},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load_args_from_console():
|
||||
from dataclasses import fields
|
||||
|
||||
@@ -442,8 +442,8 @@ class GenericTask(Task):
|
||||
X_train_all, y_train_all = shuffle(X_train_all, y_train_all, random_state=RANDOM_SEED)
|
||||
if data_is_df:
|
||||
X_train_all.reset_index(drop=True, inplace=True)
|
||||
if isinstance(y_train_all, pd.Series):
|
||||
y_train_all.reset_index(drop=True, inplace=True)
|
||||
if isinstance(y_train_all, pd.Series):
|
||||
y_train_all.reset_index(drop=True, inplace=True)
|
||||
|
||||
X_train, y_train = X_train_all, y_train_all
|
||||
state.groups_all = state.groups
|
||||
@@ -746,7 +746,10 @@ class GenericTask(Task):
|
||||
elif isinstance(kf, TimeSeriesSplit):
|
||||
kf = kf.split(X_train_split, y_train_split)
|
||||
else:
|
||||
kf = kf.split(X_train_split)
|
||||
try:
|
||||
kf = kf.split(X_train_split)
|
||||
except TypeError:
|
||||
kf = kf.split(X_train_split, y_train_split)
|
||||
|
||||
for train_index, val_index in kf:
|
||||
if shuffle:
|
||||
@@ -769,10 +772,10 @@ class GenericTask(Task):
|
||||
if not is_spark_dataframe:
|
||||
y_train, y_val = y_train_split[train_index], y_train_split[val_index]
|
||||
if weight is not None:
|
||||
fit_kwargs["sample_weight"], weight_val = (
|
||||
weight[train_index],
|
||||
weight[val_index],
|
||||
fit_kwargs["sample_weight"] = (
|
||||
weight[train_index] if isinstance(weight, np.ndarray) else weight.iloc[train_index]
|
||||
)
|
||||
weight_val = weight[val_index] if isinstance(weight, np.ndarray) else weight.iloc[val_index]
|
||||
if groups is not None:
|
||||
fit_kwargs["groups"] = (
|
||||
groups[train_index] if isinstance(groups, np.ndarray) else groups.iloc[train_index]
|
||||
|
||||
@@ -192,7 +192,7 @@ class Task(ABC):
|
||||
* Valid str options depend on different tasks.
|
||||
For classification tasks, valid choices are
|
||||
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
|
||||
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
|
||||
For regression tasks, valid choices are ["auto", 'uniform', 'time', 'group'].
|
||||
"auto" -> uniform.
|
||||
For time series forecast tasks, must be "auto" or 'time'.
|
||||
For ranking task, must be "auto" or 'group'.
|
||||
|
||||
@@ -529,7 +529,7 @@ def remove_ts_duplicates(
|
||||
duplicates = X.duplicated()
|
||||
|
||||
if any(duplicates):
|
||||
logger.warning("Duplicate timestamp values found in timestamp column. " f"\n{X.loc[duplicates, X][time_col]}")
|
||||
logger.warning("Duplicate timestamp values found in timestamp column. " f"\n{X.loc[duplicates, time_col]}")
|
||||
X = X.drop_duplicates()
|
||||
logger.warning("Removed duplicate rows based on all columns")
|
||||
assert (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import inspect
|
||||
import time
|
||||
|
||||
try:
|
||||
@@ -106,12 +107,17 @@ class TemporalFusionTransformerEstimator(TimeSeriesEstimator):
|
||||
def fit(self, X_train, y_train, budget=None, **kwargs):
|
||||
import warnings
|
||||
|
||||
import pytorch_lightning as pl
|
||||
try:
|
||||
import lightning.pytorch as pl
|
||||
from lightning.pytorch.callbacks import EarlyStopping, LearningRateMonitor
|
||||
from lightning.pytorch.loggers import TensorBoardLogger
|
||||
except ImportError:
|
||||
import pytorch_lightning as pl
|
||||
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
|
||||
from pytorch_lightning.loggers import TensorBoardLogger
|
||||
import torch
|
||||
from pytorch_forecasting import TemporalFusionTransformer
|
||||
from pytorch_forecasting.metrics import QuantileLoss
|
||||
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
|
||||
from pytorch_lightning.loggers import TensorBoardLogger
|
||||
|
||||
# a bit of monkey patching to fix the MacOS test
|
||||
# all the log_prediction method appears to do is plot stuff, which ?breaks github tests
|
||||
@@ -132,12 +138,26 @@ class TemporalFusionTransformerEstimator(TimeSeriesEstimator):
|
||||
lr_logger = LearningRateMonitor() # log the learning rate
|
||||
logger = TensorBoardLogger(kwargs.get("log_dir", "lightning_logs")) # logging results to a tensorboard
|
||||
default_trainer_kwargs = dict(
|
||||
gpus=self._kwargs.get("gpu_per_trial", [0]) if torch.cuda.is_available() else None,
|
||||
max_epochs=max_epochs,
|
||||
gradient_clip_val=gradient_clip_val,
|
||||
callbacks=[lr_logger, early_stop_callback],
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
# PyTorch Lightning >=2.0 replaced `gpus` with `accelerator`/`devices`.
|
||||
# Also, passing `gpus=None` is not accepted on newer versions.
|
||||
trainer_sig_params = inspect.signature(pl.Trainer.__init__).parameters
|
||||
if torch.cuda.is_available() and "gpus" in trainer_sig_params:
|
||||
gpus = self._kwargs.get("gpu_per_trial", None)
|
||||
if gpus is not None:
|
||||
default_trainer_kwargs["gpus"] = gpus
|
||||
elif torch.cuda.is_available() and "devices" in trainer_sig_params:
|
||||
devices = self._kwargs.get("gpu_per_trial", None)
|
||||
if devices == -1:
|
||||
devices = "auto"
|
||||
if devices is not None:
|
||||
default_trainer_kwargs["accelerator"] = "gpu"
|
||||
default_trainer_kwargs["devices"] = devices
|
||||
trainer = pl.Trainer(
|
||||
**default_trainer_kwargs,
|
||||
)
|
||||
@@ -157,7 +177,14 @@ class TemporalFusionTransformerEstimator(TimeSeriesEstimator):
|
||||
val_dataloaders=val_dataloader,
|
||||
)
|
||||
best_model_path = trainer.checkpoint_callback.best_model_path
|
||||
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
|
||||
# PyTorch 2.6 changed `torch.load` default `weights_only` from False -> True.
|
||||
# Some Lightning checkpoints (including those produced here) can require full unpickling.
|
||||
# This path is generated locally during training, so it's trusted.
|
||||
load_sig_params = inspect.signature(TemporalFusionTransformer.load_from_checkpoint).parameters
|
||||
if "weights_only" in load_sig_params:
|
||||
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path, weights_only=False)
|
||||
else:
|
||||
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
|
||||
train_time = time.time() - current_time
|
||||
self._model = best_tft
|
||||
return train_time
|
||||
|
||||
@@ -9,6 +9,7 @@ import numpy as np
|
||||
try:
|
||||
import pandas as pd
|
||||
from pandas import DataFrame, Series, to_datetime
|
||||
from pandas.api.types import is_datetime64_any_dtype
|
||||
from scipy.sparse import issparse
|
||||
from sklearn.compose import ColumnTransformer
|
||||
from sklearn.impute import SimpleImputer
|
||||
@@ -392,6 +393,15 @@ class DataTransformerTS:
|
||||
assert len(self.num_columns) == 0, "Trying to call fit() twice, something is wrong"
|
||||
|
||||
for column in X.columns:
|
||||
# Never treat the time column as a feature for sklearn preprocessing
|
||||
if column == self.time_col:
|
||||
continue
|
||||
|
||||
# Robust datetime detection (covers datetime64[ms/us/ns], tz-aware, etc.)
|
||||
if is_datetime64_any_dtype(X[column]):
|
||||
self.datetime_columns.append(column)
|
||||
continue
|
||||
|
||||
# sklearn/utils/validation.py needs int/float values
|
||||
if X[column].dtype.name in ("object", "category", "string"):
|
||||
if (
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
import atexit
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pickle
|
||||
import random
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import warnings
|
||||
from concurrent.futures import ThreadPoolExecutor, wait
|
||||
from typing import MutableMapping
|
||||
|
||||
import mlflow
|
||||
@@ -12,14 +16,15 @@ import pandas as pd
|
||||
from mlflow.entities import Metric, Param, RunTag
|
||||
from mlflow.exceptions import MlflowException
|
||||
from mlflow.utils.autologging_utils import AUTOLOGGING_INTEGRATIONS, autologging_is_disabled
|
||||
from packaging.requirements import Requirement
|
||||
from scipy.sparse import issparse
|
||||
from sklearn import tree
|
||||
|
||||
try:
|
||||
from pyspark.ml import Pipeline as SparkPipeline
|
||||
from pyspark.ml import PipelineModel as SparkPipelineModel
|
||||
except ImportError:
|
||||
|
||||
class SparkPipeline:
|
||||
class SparkPipelineModel:
|
||||
pass
|
||||
|
||||
|
||||
@@ -32,6 +37,84 @@ from flaml.version import __version__
|
||||
|
||||
SEARCH_MAX_RESULTS = 5000 # Each train should not have more than 5000 trials
|
||||
IS_RENAME_CHILD_RUN = os.environ.get("FLAML_IS_RENAME_CHILD_RUN", "false").lower() == "true"
|
||||
REMOVE_REQUIREMENT_LIST = [
|
||||
"synapseml-cognitive",
|
||||
"synapseml-core",
|
||||
"synapseml-deep-learning",
|
||||
"synapseml-internal",
|
||||
"synapseml-mlflow",
|
||||
"synapseml-opencv",
|
||||
"synapseml-vw",
|
||||
"synapseml-lightgbm",
|
||||
"synapseml-utils",
|
||||
"nni",
|
||||
"optuna",
|
||||
]
|
||||
OPTIONAL_REMOVE_REQUIREMENT_LIST = ["pytorch-lightning", "transformers"]
|
||||
|
||||
os.environ["MLFLOW_ENABLE_ARTIFACTS_PROGRESS_BAR"] = os.environ.get("MLFLOW_ENABLE_ARTIFACTS_PROGRESS_BAR", "false")
|
||||
|
||||
MLFLOW_NUM_WORKERS = int(os.environ.get("FLAML_MLFLOW_NUM_WORKERS", os.cpu_count() * 4 if os.cpu_count() else 2))
|
||||
executor = ThreadPoolExecutor(max_workers=MLFLOW_NUM_WORKERS)
|
||||
atexit.register(lambda: executor.shutdown(wait=True))
|
||||
|
||||
IS_CLEAN_LOGS = os.environ.get("FLAML_IS_CLEAN_LOGS", "1")
|
||||
if IS_CLEAN_LOGS == "1":
|
||||
logging.getLogger("synapse.ml").setLevel(logging.CRITICAL)
|
||||
logging.getLogger("mlflow.utils").setLevel(logging.CRITICAL)
|
||||
logging.getLogger("mlflow.utils.environment").setLevel(logging.CRITICAL)
|
||||
logging.getLogger("mlflow.models.model").setLevel(logging.CRITICAL)
|
||||
warnings.simplefilter("ignore", category=FutureWarning)
|
||||
warnings.simplefilter("ignore", category=UserWarning)
|
||||
|
||||
|
||||
def convert_requirement(requirement_list: list[str]):
|
||||
ret = (
|
||||
[Requirement(s.strip().lower()) for s in requirement_list]
|
||||
if mlflow.__version__ <= "2.17.0"
|
||||
else requirement_list
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
def time_it(func_or_code=None):
|
||||
"""
|
||||
Decorator or function that measures execution time.
|
||||
|
||||
Can be used in three ways:
|
||||
1. As a decorator with no arguments: @time_it
|
||||
2. As a decorator with arguments: @time_it()
|
||||
3. As a function call with a string of code to execute and time: time_it("some_code()")
|
||||
|
||||
Args:
|
||||
func_or_code (callable or str, optional): Either a function to decorate or
|
||||
a string of code to execute and time.
|
||||
|
||||
Returns:
|
||||
callable or None: Returns a decorated function if used as a decorator,
|
||||
or None if used to execute a string of code.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
start_time = time.time()
|
||||
result = func(*args, **kwargs)
|
||||
end_time = time.time()
|
||||
logger.debug(f"Execution of {func.__name__} took {end_time - start_time:.4f} seconds")
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
if callable(func_or_code):
|
||||
return decorator(func_or_code)
|
||||
elif func_or_code is None:
|
||||
return decorator
|
||||
else:
|
||||
start_time = time.time()
|
||||
exec(func_or_code)
|
||||
end_time = time.time()
|
||||
logger.debug(f"Execution\n```\n{func_or_code}\n```\ntook {end_time - start_time:.4f} seconds")
|
||||
|
||||
|
||||
def flatten_dict(d: MutableMapping, sep: str = ".") -> MutableMapping:
|
||||
@@ -49,23 +132,28 @@ def is_autolog_enabled():
|
||||
return not all(autologging_is_disabled(k) for k in AUTOLOGGING_INTEGRATIONS.keys())
|
||||
|
||||
|
||||
def get_mlflow_log_latency(model_history=False):
|
||||
def get_mlflow_log_latency(model_history=False, delete_run=True):
|
||||
try:
|
||||
FLAML_MLFLOW_LOG_LATENCY = float(os.getenv("FLAML_MLFLOW_LOG_LATENCY", 0))
|
||||
except ValueError:
|
||||
FLAML_MLFLOW_LOG_LATENCY = 0
|
||||
if FLAML_MLFLOW_LOG_LATENCY >= 0.1:
|
||||
return FLAML_MLFLOW_LOG_LATENCY
|
||||
st = time.time()
|
||||
with mlflow.start_run(nested=True, run_name="get_mlflow_log_latency") as run:
|
||||
if model_history:
|
||||
sk_model = tree.DecisionTreeClassifier()
|
||||
mlflow.sklearn.log_model(sk_model, "sk_models")
|
||||
mlflow.sklearn.log_model(Pipeline([("estimator", sk_model)]), "sk_pipeline")
|
||||
mlflow.sklearn.log_model(sk_model, "model")
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
pickle_fpath = os.path.join(tmpdir, f"tmp_{int(time.time()*1000)}")
|
||||
pickle_fpath = os.path.join(tmpdir, f"tmp_{int(time.time() * 1000)}")
|
||||
with open(pickle_fpath, "wb") as f:
|
||||
pickle.dump(sk_model, f)
|
||||
mlflow.log_artifact(pickle_fpath, "sk_model1")
|
||||
mlflow.log_artifact(pickle_fpath, "sk_model2")
|
||||
mlflow.log_artifact(pickle_fpath, "sk_model")
|
||||
mlflow.set_tag("synapseml.ui.visible", "false") # not shown inline in fabric
|
||||
mlflow.delete_run(run.info.run_id)
|
||||
if delete_run:
|
||||
mlflow.delete_run(run.info.run_id)
|
||||
et = time.time()
|
||||
return et - st
|
||||
return 3 * (et - st)
|
||||
|
||||
|
||||
def infer_signature(X_train=None, y_train=None, dataframe=None, label=None):
|
||||
@@ -98,12 +186,76 @@ def infer_signature(X_train=None, y_train=None, dataframe=None, label=None):
|
||||
)
|
||||
|
||||
|
||||
def update_and_install_requirements(
|
||||
run_id=None,
|
||||
model_name=None,
|
||||
model_version=None,
|
||||
remove_list=None,
|
||||
artifact_path="model",
|
||||
dst_path=None,
|
||||
install_with_ipython=False,
|
||||
):
|
||||
if not (run_id or (model_name and model_version)):
|
||||
raise ValueError(
|
||||
"Please provide `run_id` or both `model_name` and `model_version`. If all three are provided, `run_id` will be used."
|
||||
)
|
||||
|
||||
if install_with_ipython:
|
||||
from IPython import get_ipython
|
||||
|
||||
if not remove_list:
|
||||
remove_list = [
|
||||
"synapseml-cognitive",
|
||||
"synapseml-core",
|
||||
"synapseml-deep-learning",
|
||||
"synapseml-internal",
|
||||
"synapseml-mlflow",
|
||||
"synapseml-opencv",
|
||||
"synapseml-vw",
|
||||
"synapseml-lightgbm",
|
||||
"synapseml-utils",
|
||||
"flaml", # flaml is needed for AutoML models, should be pre-installed in the runtime
|
||||
"pyspark", # fabric internal pyspark should be pre-installed in the runtime
|
||||
]
|
||||
|
||||
# Download model artifacts
|
||||
client = mlflow.MlflowClient()
|
||||
if not run_id:
|
||||
run_id = client.get_model_version(model_name, model_version).run_id
|
||||
if not dst_path:
|
||||
dst_path = os.path.join(tempfile.gettempdir(), "model_artifacts")
|
||||
os.makedirs(dst_path, exist_ok=True)
|
||||
client.download_artifacts(run_id, artifact_path, dst_path)
|
||||
requirements_path = os.path.join(dst_path, artifact_path, "requirements.txt")
|
||||
with open(requirements_path) as f:
|
||||
reqs = f.read().splitlines()
|
||||
old_reqs = [Requirement(req) for req in reqs if req]
|
||||
old_reqs_dict = {req.name: str(req) for req in old_reqs}
|
||||
for req in remove_list:
|
||||
req = Requirement(req)
|
||||
if req.name in old_reqs_dict:
|
||||
old_reqs_dict.pop(req.name, None)
|
||||
new_reqs_list = list(old_reqs_dict.values())
|
||||
|
||||
with open(requirements_path, "w") as f:
|
||||
f.write("\n".join(new_reqs_list))
|
||||
|
||||
if install_with_ipython:
|
||||
get_ipython().run_line_magic("pip", f"install -r {requirements_path} -q")
|
||||
else:
|
||||
logger.info(f"You can run `pip install -r {requirements_path}` to install dependencies.")
|
||||
return requirements_path
|
||||
|
||||
|
||||
def _mlflow_wrapper(evaluation_func, mlflow_exp_id, mlflow_config=None, extra_tags=None, autolog=False):
|
||||
def wrapped(*args, **kwargs):
|
||||
if mlflow_config is not None:
|
||||
from synapse.ml.mlflow import set_mlflow_env_config
|
||||
try:
|
||||
from synapse.ml.mlflow import set_mlflow_env_config
|
||||
|
||||
set_mlflow_env_config(mlflow_config)
|
||||
set_mlflow_env_config(mlflow_config)
|
||||
except Exception:
|
||||
pass
|
||||
import mlflow
|
||||
|
||||
if mlflow_exp_id is not None:
|
||||
@@ -124,7 +276,20 @@ def _mlflow_wrapper(evaluation_func, mlflow_exp_id, mlflow_config=None, extra_ta
|
||||
|
||||
|
||||
def _get_notebook_name():
|
||||
return None
|
||||
try:
|
||||
import re
|
||||
|
||||
from synapse.ml.mlflow import get_mlflow_env_config
|
||||
from synapse.ml.mlflow.shared_platform_utils import get_artifact
|
||||
|
||||
notebook_id = get_mlflow_env_config(False).artifact_id
|
||||
current_notebook = get_artifact(notebook_id)
|
||||
notebook_name = re.sub("\\W+", "-", current_notebook.displayName).strip()
|
||||
|
||||
return notebook_name
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get notebook name: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def safe_json_dumps(obj):
|
||||
@@ -163,6 +328,8 @@ class MLflowIntegration:
|
||||
self.has_model = False
|
||||
self.only_history = False
|
||||
self._do_log_model = True
|
||||
self.futures = {}
|
||||
self.futures_log_model = {}
|
||||
|
||||
self.extra_tag = (
|
||||
extra_tag
|
||||
@@ -170,6 +337,9 @@ class MLflowIntegration:
|
||||
else {"extra_tag.sid": f"flaml_{__version__}_{int(time.time())}_{random.randint(1001, 9999)}"}
|
||||
)
|
||||
self.start_time = time.time()
|
||||
self.experiment_type = experiment_type
|
||||
self.update_autolog_state()
|
||||
|
||||
self.mlflow_client = mlflow.tracking.MlflowClient()
|
||||
parent_run_info = mlflow.active_run().info if mlflow.active_run() is not None else None
|
||||
if parent_run_info:
|
||||
@@ -188,8 +358,6 @@ class MLflowIntegration:
|
||||
mlflow.set_experiment(experiment_name=mlflow_exp_name)
|
||||
self.experiment_id = mlflow.tracking.fluent._active_experiment_id
|
||||
self.experiment_name = mlflow.get_experiment(self.experiment_id).name
|
||||
self.experiment_type = experiment_type
|
||||
self.update_autolog_state()
|
||||
|
||||
if self.autolog:
|
||||
# only end user created parent run in autolog scenario
|
||||
@@ -197,9 +365,12 @@ class MLflowIntegration:
|
||||
|
||||
def set_mlflow_config(self):
|
||||
if self.driver_mlflow_env_config is not None:
|
||||
from synapse.ml.mlflow import set_mlflow_env_config
|
||||
try:
|
||||
from synapse.ml.mlflow import set_mlflow_env_config
|
||||
|
||||
set_mlflow_env_config(self.driver_mlflow_env_config)
|
||||
set_mlflow_env_config(self.driver_mlflow_env_config)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def wrap_evaluation_function(self, evaluation_function):
|
||||
wrapped_evaluation_function = _mlflow_wrapper(
|
||||
@@ -267,6 +438,7 @@ class MLflowIntegration:
|
||||
else:
|
||||
_tags = []
|
||||
self.mlflow_client.log_batch(run_id=target_id, metrics=_metrics, params=[], tags=_tags)
|
||||
return f"Successfully copy_mlflow_run run_id {src_id} to run_id {target_id}"
|
||||
|
||||
def record_trial(self, result, trial, metric):
|
||||
if isinstance(result, dict):
|
||||
@@ -334,12 +506,31 @@ class MLflowIntegration:
|
||||
self.copy_mlflow_run(best_mlflow_run_id, self.parent_run_id)
|
||||
self.has_summary = True
|
||||
|
||||
def log_model(self, model, estimator, signature=None):
|
||||
def log_model(self, model, estimator, signature=None, run_id=None):
|
||||
if not self._do_log_model:
|
||||
return
|
||||
logger.debug(f"logging model {estimator}")
|
||||
ret_message = f"Successfully log_model {estimator} to run_id {run_id}"
|
||||
optional_remove_list = (
|
||||
[] if estimator in ["transformer", "transformer_ms", "tcn", "tft"] else OPTIONAL_REMOVE_REQUIREMENT_LIST
|
||||
)
|
||||
run = mlflow.active_run()
|
||||
if run and run.info.run_id == self.parent_run_id:
|
||||
logger.debug(
|
||||
f"Current active run_id {run.info.run_id} == parent_run_id {self.parent_run_id}, Starting run_id {run_id}"
|
||||
)
|
||||
mlflow.start_run(run_id=run_id, nested=True)
|
||||
elif run and run.info.run_id != run_id:
|
||||
ret_message = (
|
||||
f"Error: Should log_model {estimator} to run_id {run_id}, but logged to run_id {run.info.run_id}"
|
||||
)
|
||||
logger.error(ret_message)
|
||||
else:
|
||||
logger.debug(f"No active run, start run_id {run_id}")
|
||||
mlflow.start_run(run_id=run_id)
|
||||
logger.debug(f"logged model {estimator} to run_id {mlflow.active_run().info.run_id}")
|
||||
if estimator.endswith("_spark"):
|
||||
mlflow.spark.log_model(model, estimator, signature=signature)
|
||||
# mlflow.spark.log_model(model, estimator, signature=signature)
|
||||
mlflow.spark.log_model(model, "model", signature=signature)
|
||||
elif estimator in ["lgbm"]:
|
||||
mlflow.lightgbm.log_model(model, estimator, signature=signature)
|
||||
@@ -352,42 +543,93 @@ class MLflowIntegration:
|
||||
elif estimator in ["prophet"]:
|
||||
mlflow.prophet.log_model(model, estimator, signature=signature)
|
||||
elif estimator in ["orbit"]:
|
||||
pass
|
||||
logger.warning(f"Unsupported model: {estimator}. No model logged.")
|
||||
else:
|
||||
mlflow.sklearn.log_model(model, estimator, signature=signature)
|
||||
future = executor.submit(
|
||||
lambda: mlflow.models.model.update_model_requirements(
|
||||
model_uri=f"runs:/{run_id}/{'model' if estimator.endswith('_spark') else estimator}",
|
||||
operation="remove",
|
||||
requirement_list=convert_requirement(REMOVE_REQUIREMENT_LIST + optional_remove_list),
|
||||
)
|
||||
)
|
||||
self.futures[future] = f"run_{run_id}_requirements_updated"
|
||||
if not run or run.info.run_id == self.parent_run_id:
|
||||
logger.debug(f"Ending current run_id {mlflow.active_run().info.run_id}")
|
||||
mlflow.end_run()
|
||||
return ret_message
|
||||
|
||||
def _pickle_and_log_artifact(self, obj, artifact_name, pickle_fname="temp_.pkl"):
|
||||
def _pickle_and_log_artifact(self, obj, artifact_name, pickle_fname="temp_.pkl", run_id=None):
|
||||
if not self._do_log_model:
|
||||
return
|
||||
return True
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
pickle_fpath = os.path.join(tmpdir, pickle_fname)
|
||||
try:
|
||||
with open(pickle_fpath, "wb") as f:
|
||||
pickle.dump(obj, f)
|
||||
mlflow.log_artifact(pickle_fpath, artifact_name)
|
||||
mlflow.log_artifact(pickle_fpath, artifact_name, run_id)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to pickle and log artifact {artifact_name}, error: {e}")
|
||||
logger.debug(f"Failed to pickle and log {artifact_name}, error: {e}")
|
||||
return False
|
||||
|
||||
def pickle_and_log_automl_artifacts(self, automl, model, estimator, signature=None):
|
||||
def _log_pipeline(self, pipeline, flavor_name, pipeline_name, signature, run_id, estimator=None):
|
||||
logger.debug(f"logging pipeline {flavor_name}:{pipeline_name}:{estimator}")
|
||||
ret_message = f"Successfully _log_pipeline {flavor_name}:{pipeline_name}:{estimator} to run_id {run_id}"
|
||||
optional_remove_list = (
|
||||
[] if estimator in ["transformer", "transformer_ms", "tcn", "tft"] else OPTIONAL_REMOVE_REQUIREMENT_LIST
|
||||
)
|
||||
run = mlflow.active_run()
|
||||
if run and run.info.run_id == self.parent_run_id:
|
||||
logger.debug(
|
||||
f"Current active run_id {run.info.run_id} == parent_run_id {self.parent_run_id}, Starting run_id {run_id}"
|
||||
)
|
||||
mlflow.start_run(run_id=run_id, nested=True)
|
||||
elif run and run.info.run_id != run_id:
|
||||
ret_message = f"Error: Should _log_pipeline {flavor_name}:{pipeline_name}:{estimator} model to run_id {run_id}, but logged to run_id {run.info.run_id}"
|
||||
logger.error(ret_message)
|
||||
else:
|
||||
logger.debug(f"No active run, start run_id {run_id}")
|
||||
mlflow.start_run(run_id=run_id)
|
||||
logger.debug(
|
||||
f"logging pipeline {flavor_name}:{pipeline_name}:{estimator} to run_id {mlflow.active_run().info.run_id}"
|
||||
)
|
||||
if flavor_name == "sklearn":
|
||||
mlflow.sklearn.log_model(pipeline, pipeline_name, signature=signature)
|
||||
elif flavor_name == "spark":
|
||||
mlflow.spark.log_model(pipeline, pipeline_name, signature=signature)
|
||||
else:
|
||||
logger.warning(f"Unsupported pipeline flavor: {flavor_name}. No model logged.")
|
||||
future = executor.submit(
|
||||
lambda: mlflow.models.model.update_model_requirements(
|
||||
model_uri=f"runs:/{run_id}/{pipeline_name}",
|
||||
operation="remove",
|
||||
requirement_list=convert_requirement(REMOVE_REQUIREMENT_LIST + optional_remove_list),
|
||||
)
|
||||
)
|
||||
self.futures[future] = f"run_{run_id}_requirements_updated"
|
||||
if not run or run.info.run_id == self.parent_run_id:
|
||||
logger.debug(f"Ending current run_id {mlflow.active_run().info.run_id}")
|
||||
mlflow.end_run()
|
||||
return ret_message
|
||||
|
||||
def pickle_and_log_automl_artifacts(self, automl, model, estimator, signature=None, run_id=None):
|
||||
"""log automl artifacts to mlflow
|
||||
load back with `automl = mlflow.pyfunc.load_model(model_run_id_or_uri)`, then do prediction with `automl.predict(X)`
|
||||
"""
|
||||
logger.debug(f"logging automl artifacts {estimator}")
|
||||
self._pickle_and_log_artifact(automl.feature_transformer, "feature_transformer", "feature_transformer.pkl")
|
||||
self._pickle_and_log_artifact(automl.label_transformer, "label_transformer", "label_transformer.pkl")
|
||||
# Test test_mlflow 1 and 4 will get error: TypeError: cannot pickle '_io.TextIOWrapper' object
|
||||
# try:
|
||||
# self._pickle_and_log_artifact(automl, "automl", "automl.pkl")
|
||||
# except TypeError:
|
||||
# pass
|
||||
logger.debug(f"logging automl estimator {estimator}")
|
||||
# self._pickle_and_log_artifact(
|
||||
# automl.feature_transformer, "feature_transformer", "feature_transformer.pkl", run_id
|
||||
# )
|
||||
# self._pickle_and_log_artifact(automl.label_transformer, "label_transformer", "label_transformer.pkl", run_id)
|
||||
if estimator.endswith("_spark"):
|
||||
# spark pipeline is not supported yet
|
||||
return
|
||||
feature_transformer = automl.feature_transformer
|
||||
if isinstance(feature_transformer, Pipeline):
|
||||
if isinstance(feature_transformer, Pipeline) and not estimator.endswith("_spark"):
|
||||
pipeline = feature_transformer
|
||||
pipeline.steps.append(("estimator", model))
|
||||
elif isinstance(feature_transformer, SparkPipeline):
|
||||
elif isinstance(feature_transformer, SparkPipelineModel) and estimator.endswith("_spark"):
|
||||
pipeline = feature_transformer
|
||||
pipeline.stages.append(model)
|
||||
elif not estimator.endswith("_spark"):
|
||||
@@ -395,24 +637,26 @@ class MLflowIntegration:
|
||||
steps.append(("estimator", model))
|
||||
pipeline = Pipeline(steps)
|
||||
else:
|
||||
stages = [feature_transformer]
|
||||
stages = []
|
||||
if feature_transformer is not None:
|
||||
stages.append(feature_transformer)
|
||||
stages.append(model)
|
||||
pipeline = SparkPipeline(stages=stages)
|
||||
if isinstance(pipeline, SparkPipeline):
|
||||
pipeline = SparkPipelineModel(stages=stages)
|
||||
if isinstance(pipeline, SparkPipelineModel):
|
||||
logger.debug(f"logging spark pipeline {estimator}")
|
||||
mlflow.spark.log_model(pipeline, "automl_pipeline", signature=signature)
|
||||
self._log_pipeline(pipeline, "spark", "model", signature, run_id, estimator)
|
||||
else:
|
||||
# Add a log named "model" to fit default settings
|
||||
logger.debug(f"logging sklearn pipeline {estimator}")
|
||||
mlflow.sklearn.log_model(pipeline, "automl_pipeline", signature=signature)
|
||||
mlflow.sklearn.log_model(pipeline, "model", signature=signature)
|
||||
self._log_pipeline(pipeline, "sklearn", "model", signature, run_id, estimator)
|
||||
return f"Successfully pickle_and_log_automl_artifacts {estimator} to run_id {run_id}"
|
||||
|
||||
@time_it
|
||||
def record_state(self, automl, search_state, estimator):
|
||||
_st = time.time()
|
||||
automl_metric_name = (
|
||||
automl._state.metric if isinstance(automl._state.metric, str) else automl._state.error_metric
|
||||
)
|
||||
|
||||
if automl._state.error_metric.startswith("1-"):
|
||||
automl_metric_value = 1 - search_state.val_loss
|
||||
elif automl._state.error_metric.startswith("-"):
|
||||
@@ -425,6 +669,8 @@ class MLflowIntegration:
|
||||
else:
|
||||
config = search_state.config
|
||||
|
||||
self.automl_user_configurations = safe_json_dumps(automl._automl_user_configurations)
|
||||
|
||||
info = {
|
||||
"metrics": {
|
||||
"iter_counter": automl._track_iter,
|
||||
@@ -445,7 +691,7 @@ class MLflowIntegration:
|
||||
"flaml.meric": automl_metric_name,
|
||||
"flaml.run_source": "flaml-automl",
|
||||
"flaml.log_type": self.log_type,
|
||||
"flaml.automl_user_configurations": safe_json_dumps(automl._automl_user_configurations),
|
||||
"flaml.automl_user_configurations": self.automl_user_configurations,
|
||||
},
|
||||
"params": {
|
||||
"sample_size": search_state.sample_size,
|
||||
@@ -472,33 +718,70 @@ class MLflowIntegration:
|
||||
run_name = f"{self.parent_run_name}_child_{self.child_counter}"
|
||||
else:
|
||||
run_name = None
|
||||
_t1 = time.time()
|
||||
wait(self.futures_log_model)
|
||||
_t2 = time.time() - _t1
|
||||
logger.debug(f"wait futures_log_model in record_state took {_t2} seconds")
|
||||
with mlflow.start_run(nested=True, run_name=run_name) as child_run:
|
||||
self._log_info_to_run(info, child_run.info.run_id, log_params=True)
|
||||
future = executor.submit(lambda: self._log_info_to_run(info, child_run.info.run_id, log_params=True))
|
||||
self.futures[future] = f"iter_{automl._track_iter}_log_info_to_run"
|
||||
future = executor.submit(lambda: self._log_automl_configurations(child_run.info.run_id))
|
||||
self.futures[future] = f"iter_{automl._track_iter}_log_automl_configurations"
|
||||
if automl._state.model_history:
|
||||
self.log_model(
|
||||
search_state.trained_estimator._model, estimator, signature=automl.estimator_signature
|
||||
)
|
||||
self.pickle_and_log_automl_artifacts(
|
||||
automl, search_state.trained_estimator, estimator, signature=automl.pipeline_signature
|
||||
)
|
||||
if estimator.endswith("_spark"):
|
||||
future = executor.submit(
|
||||
lambda: self.log_model(
|
||||
search_state.trained_estimator._model,
|
||||
estimator,
|
||||
automl.estimator_signature,
|
||||
child_run.info.run_id,
|
||||
)
|
||||
)
|
||||
self.futures_log_model[future] = f"record_state-log_model_{estimator}"
|
||||
else:
|
||||
future = executor.submit(
|
||||
lambda: self.pickle_and_log_automl_artifacts(
|
||||
automl,
|
||||
search_state.trained_estimator,
|
||||
estimator,
|
||||
automl.pipeline_signature,
|
||||
child_run.info.run_id,
|
||||
)
|
||||
)
|
||||
self.futures_log_model[future] = f"record_state-pickle_and_log_automl_artifacts_{estimator}"
|
||||
self.manual_run_ids.append(child_run.info.run_id)
|
||||
self.child_counter += 1
|
||||
return f"Successfully record_state iteration {automl._track_iter}"
|
||||
|
||||
@time_it
|
||||
def log_automl(self, automl):
|
||||
self.set_best_iter(automl)
|
||||
if self.autolog:
|
||||
if self.parent_run_id is not None:
|
||||
mlflow.start_run(run_id=self.parent_run_id, experiment_id=self.experiment_id)
|
||||
mlflow.log_metric("best_validation_loss", automl._state.best_loss)
|
||||
mlflow.log_metric("best_iteration", automl._best_iteration)
|
||||
mlflow.log_metric("num_child_runs", len(self.infos))
|
||||
if automl._trained_estimator is not None and not self.has_model:
|
||||
self.log_model(
|
||||
automl._trained_estimator._model, automl.best_estimator, signature=automl.estimator_signature
|
||||
)
|
||||
self.pickle_and_log_automl_artifacts(
|
||||
automl, automl.model, automl.best_estimator, signature=automl.pipeline_signature
|
||||
)
|
||||
mlflow.log_metrics(
|
||||
{
|
||||
"best_validation_loss": automl._state.best_loss,
|
||||
"best_iteration": automl._best_iteration,
|
||||
"num_child_runs": len(self.infos),
|
||||
}
|
||||
)
|
||||
if (
|
||||
automl._trained_estimator is not None
|
||||
and not self.has_model
|
||||
and automl._trained_estimator._model is not None
|
||||
):
|
||||
if automl.best_estimator.endswith("_spark"):
|
||||
self.log_model(
|
||||
automl._trained_estimator._model,
|
||||
automl.best_estimator,
|
||||
automl.estimator_signature,
|
||||
self.parent_run_id,
|
||||
)
|
||||
else:
|
||||
self.pickle_and_log_automl_artifacts(
|
||||
automl, automl.model, automl.best_estimator, automl.pipeline_signature, self.parent_run_id
|
||||
)
|
||||
self.has_model = True
|
||||
|
||||
self.adopt_children(automl)
|
||||
@@ -515,30 +798,65 @@ class MLflowIntegration:
|
||||
if "ml" in conf.keys():
|
||||
conf = conf["ml"]
|
||||
|
||||
mlflow.log_params(conf)
|
||||
mlflow.log_param("best_learner", automl._best_estimator)
|
||||
mlflow.log_params({**conf, "best_learner": automl._best_estimator}, run_id=self.parent_run_id)
|
||||
if not self.has_summary:
|
||||
logger.info(f"logging best model {automl.best_estimator}")
|
||||
self.copy_mlflow_run(best_mlflow_run_id, self.parent_run_id)
|
||||
future = executor.submit(lambda: self.copy_mlflow_run(best_mlflow_run_id, self.parent_run_id))
|
||||
self.futures[future] = "log_automl_copy_mlflow_run"
|
||||
future = executor.submit(lambda: self._log_automl_configurations(self.parent_run_id))
|
||||
self.futures[future] = "log_automl_log_automl_configurations"
|
||||
self.has_summary = True
|
||||
if automl._trained_estimator is not None and not self.has_model:
|
||||
self.log_model(
|
||||
automl._trained_estimator._model,
|
||||
automl.best_estimator,
|
||||
signature=automl.estimator_signature,
|
||||
)
|
||||
self.pickle_and_log_automl_artifacts(
|
||||
automl, automl.model, automl.best_estimator, signature=automl.pipeline_signature
|
||||
)
|
||||
_t1 = time.time()
|
||||
wait(self.futures_log_model)
|
||||
_t2 = time.time() - _t1
|
||||
logger.debug(f"wait futures_log_model in log_automl took {_t2} seconds")
|
||||
if (
|
||||
automl._trained_estimator is not None
|
||||
and not self.has_model
|
||||
and automl._trained_estimator._model is not None
|
||||
):
|
||||
if automl.best_estimator.endswith("_spark"):
|
||||
future = executor.submit(
|
||||
lambda: self.log_model(
|
||||
automl._trained_estimator._model,
|
||||
automl.best_estimator,
|
||||
signature=automl.estimator_signature,
|
||||
run_id=self.parent_run_id,
|
||||
)
|
||||
)
|
||||
self.futures_log_model[future] = f"log_automl-log_model_{automl.best_estimator}"
|
||||
else:
|
||||
future = executor.submit(
|
||||
lambda: self.pickle_and_log_automl_artifacts(
|
||||
automl,
|
||||
automl.model,
|
||||
automl.best_estimator,
|
||||
signature=automl.pipeline_signature,
|
||||
run_id=self.parent_run_id,
|
||||
)
|
||||
)
|
||||
self.futures_log_model[
|
||||
future
|
||||
] = f"log_automl-pickle_and_log_automl_artifacts_{automl.best_estimator}"
|
||||
self.has_model = True
|
||||
|
||||
def resume_mlflow(self):
|
||||
if len(self.resume_params) > 0:
|
||||
mlflow.autolog(**self.resume_params)
|
||||
|
||||
def _log_automl_configurations(self, run_id):
|
||||
self.mlflow_client.log_text(
|
||||
run_id=run_id,
|
||||
text=self.automl_user_configurations,
|
||||
artifact_file="automl_configurations/automl_user_configurations.json",
|
||||
)
|
||||
return f"Successfully _log_automl_configurations to run_id {run_id}"
|
||||
|
||||
def _log_info_to_run(self, info, run_id, log_params=False):
|
||||
_metrics = [Metric(key, value, int(time.time() * 1000), 0) for key, value in info["metrics"].items()]
|
||||
_tags = [RunTag(key, str(value)) for key, value in info["tags"].items()]
|
||||
_tags = [
|
||||
RunTag(key, str(value)[:5000]) for key, value in info["tags"].items()
|
||||
] # AML will raise error if value length > 5000
|
||||
_params = [
|
||||
Param(key, str(value))
|
||||
for key, value in info["params"].items()
|
||||
@@ -554,6 +872,7 @@ class MLflowIntegration:
|
||||
_tags = [RunTag("mlflow.parentRunId", run_id)]
|
||||
self.mlflow_client.log_batch(run_id=run.info.run_id, metrics=_metrics, params=[], tags=_tags)
|
||||
del info["submetrics"]["values"]
|
||||
return f"Successfully _log_info_to_run to run_id {run_id}"
|
||||
|
||||
def adopt_children(self, result=None):
|
||||
"""
|
||||
|
||||
37
flaml/tune/logger.py
Normal file
37
flaml/tune/logger.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
|
||||
class ColoredFormatter(logging.Formatter):
|
||||
# ANSI escape codes for colors
|
||||
COLORS = {
|
||||
# logging.DEBUG: "\033[36m", # Cyan
|
||||
# logging.INFO: "\033[32m", # Green
|
||||
logging.WARNING: "\033[33m", # Yellow
|
||||
logging.ERROR: "\033[31m", # Red
|
||||
logging.CRITICAL: "\033[1;31m", # Bright Red
|
||||
}
|
||||
RESET = "\033[0m" # Reset to default
|
||||
|
||||
def __init__(self, fmt, datefmt, use_color=True):
|
||||
super().__init__(fmt, datefmt)
|
||||
self.use_color = use_color
|
||||
|
||||
def format(self, record):
|
||||
formatted = super().format(record)
|
||||
if self.use_color:
|
||||
color = self.COLORS.get(record.levelno, "")
|
||||
if color:
|
||||
return f"{color}{formatted}{self.RESET}"
|
||||
return formatted
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
use_color = True
|
||||
if os.getenv("FLAML_LOG_NO_COLOR"):
|
||||
use_color = False
|
||||
|
||||
logger_formatter = ColoredFormatter(
|
||||
"[%(name)s: %(asctime)s] {%(lineno)d} %(levelname)s - %(message)s", "%m-%d %H:%M:%S", use_color
|
||||
)
|
||||
logger.propagate = False
|
||||
@@ -244,13 +244,32 @@ class BlendSearch(Searcher):
|
||||
evaluated_rewards=evaluated_rewards,
|
||||
)
|
||||
except (AssertionError, ValueError):
|
||||
self._gs = GlobalSearch(
|
||||
space=gs_space,
|
||||
metric=metric,
|
||||
mode=mode,
|
||||
seed=gs_seed,
|
||||
sampler=sampler,
|
||||
)
|
||||
try:
|
||||
self._gs = GlobalSearch(
|
||||
space=gs_space,
|
||||
metric=metric,
|
||||
mode=mode,
|
||||
seed=gs_seed,
|
||||
sampler=sampler,
|
||||
)
|
||||
except ValueError:
|
||||
# Ray Tune's OptunaSearch converts Tune domains into Optuna
|
||||
# distributions. Optuna disallows integer log distributions
|
||||
# with step != 1 (e.g., qlograndint with q>1), which can
|
||||
# raise here. Fall back to FLAML's OptunaSearch wrapper,
|
||||
# which handles these spaces more permissively.
|
||||
if getattr(GlobalSearch, "__module__", "").startswith("ray.tune"):
|
||||
from .suggestion import OptunaSearch as _FallbackOptunaSearch
|
||||
|
||||
self._gs = _FallbackOptunaSearch(
|
||||
space=gs_space,
|
||||
metric=metric,
|
||||
mode=mode,
|
||||
seed=gs_seed,
|
||||
sampler=sampler,
|
||||
)
|
||||
else:
|
||||
raise
|
||||
self._gs.space = space
|
||||
else:
|
||||
self._gs = None
|
||||
|
||||
@@ -35,6 +35,73 @@ from ..sample import (
|
||||
Quantized,
|
||||
Uniform,
|
||||
)
|
||||
|
||||
# If Ray is installed, flaml.tune may re-export Ray Tune sampling functions.
|
||||
# In that case, the search space contains Ray Tune Domain/Sampler objects,
|
||||
# which should be accepted by our Optuna search-space conversion.
|
||||
try:
|
||||
from ray import __version__ as _ray_version # type: ignore
|
||||
|
||||
if str(_ray_version).startswith("1."):
|
||||
from ray.tune.sample import ( # type: ignore
|
||||
Categorical as _RayCategorical,
|
||||
)
|
||||
from ray.tune.sample import (
|
||||
Domain as _RayDomain,
|
||||
)
|
||||
from ray.tune.sample import (
|
||||
Float as _RayFloat,
|
||||
)
|
||||
from ray.tune.sample import (
|
||||
Integer as _RayInteger,
|
||||
)
|
||||
from ray.tune.sample import (
|
||||
LogUniform as _RayLogUniform,
|
||||
)
|
||||
from ray.tune.sample import (
|
||||
Quantized as _RayQuantized,
|
||||
)
|
||||
from ray.tune.sample import (
|
||||
Uniform as _RayUniform,
|
||||
)
|
||||
else:
|
||||
from ray.tune.search.sample import ( # type: ignore
|
||||
Categorical as _RayCategorical,
|
||||
)
|
||||
from ray.tune.search.sample import (
|
||||
Domain as _RayDomain,
|
||||
)
|
||||
from ray.tune.search.sample import (
|
||||
Float as _RayFloat,
|
||||
)
|
||||
from ray.tune.search.sample import (
|
||||
Integer as _RayInteger,
|
||||
)
|
||||
from ray.tune.search.sample import (
|
||||
LogUniform as _RayLogUniform,
|
||||
)
|
||||
from ray.tune.search.sample import (
|
||||
Quantized as _RayQuantized,
|
||||
)
|
||||
from ray.tune.search.sample import (
|
||||
Uniform as _RayUniform,
|
||||
)
|
||||
|
||||
_FLOAT_TYPES = (Float, _RayFloat)
|
||||
_INTEGER_TYPES = (Integer, _RayInteger)
|
||||
_CATEGORICAL_TYPES = (Categorical, _RayCategorical)
|
||||
_DOMAIN_TYPES = (Domain, _RayDomain)
|
||||
_QUANTIZED_TYPES = (Quantized, _RayQuantized)
|
||||
_UNIFORM_TYPES = (Uniform, _RayUniform)
|
||||
_LOGUNIFORM_TYPES = (LogUniform, _RayLogUniform)
|
||||
except Exception: # pragma: no cover
|
||||
_FLOAT_TYPES = (Float,)
|
||||
_INTEGER_TYPES = (Integer,)
|
||||
_CATEGORICAL_TYPES = (Categorical,)
|
||||
_DOMAIN_TYPES = (Domain,)
|
||||
_QUANTIZED_TYPES = (Quantized,)
|
||||
_UNIFORM_TYPES = (Uniform,)
|
||||
_LOGUNIFORM_TYPES = (LogUniform,)
|
||||
from ..trial import flatten_dict, unflatten_dict
|
||||
from .variant_generator import parse_spec_vars
|
||||
|
||||
@@ -850,19 +917,22 @@ class OptunaSearch(Searcher):
|
||||
def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution:
|
||||
quantize = None
|
||||
|
||||
sampler = domain.get_sampler()
|
||||
if isinstance(sampler, Quantized):
|
||||
# Ray Tune Domains and FLAML Domains both provide get_sampler(), but
|
||||
# fall back to the .sampler attribute for robustness.
|
||||
sampler = domain.get_sampler() if hasattr(domain, "get_sampler") else getattr(domain, "sampler", None)
|
||||
|
||||
if isinstance(sampler, _QUANTIZED_TYPES) or type(sampler).__name__ == "Quantized":
|
||||
quantize = sampler.q
|
||||
sampler = sampler.sampler
|
||||
if isinstance(sampler, LogUniform):
|
||||
sampler = getattr(sampler, "sampler", None) or sampler.get_sampler()
|
||||
if isinstance(sampler, _LOGUNIFORM_TYPES) or type(sampler).__name__ == "LogUniform":
|
||||
logger.warning(
|
||||
"Optuna does not handle quantization in loguniform "
|
||||
"sampling. The parameter will be passed but it will "
|
||||
"probably be ignored."
|
||||
)
|
||||
|
||||
if isinstance(domain, Float):
|
||||
if isinstance(sampler, LogUniform):
|
||||
if isinstance(domain, _FLOAT_TYPES) or type(domain).__name__ == "Float":
|
||||
if isinstance(sampler, _LOGUNIFORM_TYPES) or type(sampler).__name__ == "LogUniform":
|
||||
if quantize:
|
||||
logger.warning(
|
||||
"Optuna does not support both quantization and "
|
||||
@@ -870,17 +940,17 @@ class OptunaSearch(Searcher):
|
||||
)
|
||||
return ot.distributions.LogUniformDistribution(domain.lower, domain.upper)
|
||||
|
||||
elif isinstance(sampler, Uniform):
|
||||
elif isinstance(sampler, _UNIFORM_TYPES) or type(sampler).__name__ == "Uniform":
|
||||
if quantize:
|
||||
return ot.distributions.DiscreteUniformDistribution(domain.lower, domain.upper, quantize)
|
||||
return ot.distributions.UniformDistribution(domain.lower, domain.upper)
|
||||
|
||||
elif isinstance(domain, Integer):
|
||||
if isinstance(sampler, LogUniform):
|
||||
elif isinstance(domain, _INTEGER_TYPES) or type(domain).__name__ == "Integer":
|
||||
if isinstance(sampler, _LOGUNIFORM_TYPES) or type(sampler).__name__ == "LogUniform":
|
||||
# ``step`` argument Deprecated in v2.0.0. ``step`` argument should be 1 in Log Distribution
|
||||
# The removal of this feature is currently scheduled for v4.0.0,
|
||||
return ot.distributions.IntLogUniformDistribution(domain.lower, domain.upper - 1, step=1)
|
||||
elif isinstance(sampler, Uniform):
|
||||
elif isinstance(sampler, _UNIFORM_TYPES) or type(sampler).__name__ == "Uniform":
|
||||
# Upper bound should be inclusive for quantization and
|
||||
# exclusive otherwise
|
||||
return ot.distributions.IntUniformDistribution(
|
||||
@@ -888,16 +958,16 @@ class OptunaSearch(Searcher):
|
||||
domain.upper - int(bool(not quantize)),
|
||||
step=quantize or 1,
|
||||
)
|
||||
elif isinstance(domain, Categorical):
|
||||
if isinstance(sampler, Uniform):
|
||||
elif isinstance(domain, _CATEGORICAL_TYPES) or type(domain).__name__ == "Categorical":
|
||||
if isinstance(sampler, _UNIFORM_TYPES) or type(sampler).__name__ == "Uniform":
|
||||
return ot.distributions.CategoricalDistribution(domain.categories)
|
||||
|
||||
raise ValueError(
|
||||
"Optuna search does not support parameters of type "
|
||||
"`{}` with samplers of type `{}`".format(type(domain).__name__, type(domain.sampler).__name__)
|
||||
"`{}` with samplers of type `{}`".format(type(domain).__name__, type(sampler).__name__)
|
||||
)
|
||||
|
||||
# Parameter name is e.g. "a/b/c" for nested dicts
|
||||
values = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
|
||||
|
||||
return values
|
||||
return values
|
||||
|
||||
@@ -162,6 +162,10 @@ def broadcast_code(custom_code="", file_name="mylearner"):
|
||||
assert isinstance(MyLargeLGBM(), LGBMEstimator)
|
||||
```
|
||||
"""
|
||||
# Check if Spark is available
|
||||
spark_available, _ = check_spark()
|
||||
|
||||
# Write to local driver file system
|
||||
flaml_path = os.path.dirname(os.path.abspath(__file__))
|
||||
custom_code = textwrap.dedent(custom_code)
|
||||
custom_path = os.path.join(flaml_path, file_name + ".py")
|
||||
@@ -169,6 +173,24 @@ def broadcast_code(custom_code="", file_name="mylearner"):
|
||||
with open(custom_path, "w") as f:
|
||||
f.write(custom_code)
|
||||
|
||||
# If using Spark, broadcast the code content to executors
|
||||
if spark_available:
|
||||
spark = SparkSession.builder.getOrCreate()
|
||||
bc_code = spark.sparkContext.broadcast(custom_code)
|
||||
|
||||
# Execute a job to ensure the code is distributed to all executors
|
||||
def _write_code(bc):
|
||||
code = bc.value
|
||||
import os
|
||||
|
||||
module_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name + ".py")
|
||||
os.makedirs(os.path.dirname(module_path), exist_ok=True)
|
||||
with open(module_path, "w") as f:
|
||||
f.write(code)
|
||||
return True
|
||||
|
||||
spark.sparkContext.parallelize(range(1)).map(lambda _: _write_code(bc_code)).collect()
|
||||
|
||||
return custom_path
|
||||
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ except (ImportError, AssertionError):
|
||||
from .analysis import ExperimentAnalysis as EA
|
||||
else:
|
||||
ray_available = True
|
||||
|
||||
import logging
|
||||
|
||||
from flaml.tune.spark.utils import PySparkOvertimeMonitor, check_spark
|
||||
|
||||
from .logger import logger, logger_formatter
|
||||
from .result import DEFAULT_METRIC
|
||||
from .trial import Trial
|
||||
|
||||
@@ -41,8 +41,6 @@ except ImportError:
|
||||
internal_mlflow = False
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.propagate = False
|
||||
_use_ray = True
|
||||
_runner = None
|
||||
_verbose = 0
|
||||
@@ -197,9 +195,16 @@ def report(_metric=None, **kwargs):
|
||||
global _training_iteration
|
||||
if _use_ray:
|
||||
try:
|
||||
from ray import tune
|
||||
from ray import __version__ as ray_version
|
||||
|
||||
return tune.report(_metric, **kwargs)
|
||||
if ray_version.startswith("1."):
|
||||
from ray import tune
|
||||
|
||||
return tune.report(_metric, **kwargs)
|
||||
else: # ray>=2
|
||||
from ray.air import session
|
||||
|
||||
return session.report(metrics={"metric": _metric, **kwargs})
|
||||
except ImportError:
|
||||
# calling tune.report() outside tune.run()
|
||||
return
|
||||
@@ -260,6 +265,8 @@ def run(
|
||||
mlflow_exp_name: Optional[str] = None,
|
||||
automl_info: Optional[Tuple[float]] = None,
|
||||
extra_tag: Optional[dict] = None,
|
||||
cost_attr: Optional[str] = "auto",
|
||||
cost_budget: Optional[float] = None,
|
||||
**ray_args,
|
||||
):
|
||||
"""The function-based way of performing HPO.
|
||||
@@ -462,6 +469,12 @@ def run(
|
||||
overwritten by the value of `n_concurrent_trials` in AutoML. When <= 0, the concurrent trials
|
||||
will be set to the number of executors.
|
||||
extra_tag: dict, default=None | Extra tags to be added to the mlflow runs created by autologging.
|
||||
cost_attr: None or str to specify the attribute to evaluate the cost of different trials.
|
||||
Default is "auto", which means that we will automatically choose the cost attribute to use (depending
|
||||
on the nature of the resource budget). When cost_attr is set to None, cost differences between different trials will be omitted
|
||||
in our search algorithm. When cost_attr is set to a str different from "auto" and "time_total_s",
|
||||
this cost_attr must be available in the result dict of the trial.
|
||||
cost_budget: A float of the cost budget. Only valid when cost_attr is a str different from "auto" and "time_total_s".
|
||||
**ray_args: keyword arguments to pass to ray.tune.run().
|
||||
Only valid when use_ray=True.
|
||||
"""
|
||||
@@ -506,10 +519,6 @@ def run(
|
||||
elif not logger.hasHandlers():
|
||||
# Add the console handler.
|
||||
_ch = logging.StreamHandler(stream=sys.stdout)
|
||||
logger_formatter = logging.Formatter(
|
||||
"[%(name)s: %(asctime)s] {%(lineno)d} %(levelname)s - %(message)s",
|
||||
"%m-%d %H:%M:%S",
|
||||
)
|
||||
_ch.setFormatter(logger_formatter)
|
||||
logger.addHandler(_ch)
|
||||
if verbose <= 2:
|
||||
@@ -600,6 +609,8 @@ def run(
|
||||
metric_constraints=metric_constraints,
|
||||
use_incumbent_result_in_evaluation=use_incumbent_result_in_evaluation,
|
||||
lexico_objectives=lexico_objectives,
|
||||
cost_attr=cost_attr,
|
||||
cost_budget=cost_budget,
|
||||
)
|
||||
else:
|
||||
if metric is None or mode is None:
|
||||
@@ -735,10 +746,16 @@ def run(
|
||||
max_concurrent = max(1, search_alg.max_concurrent)
|
||||
else:
|
||||
max_concurrent = max(1, max_spark_parallelism)
|
||||
passed_in_n_concurrent_trials = max(n_concurrent_trials, max_concurrent)
|
||||
n_concurrent_trials = min(
|
||||
n_concurrent_trials if n_concurrent_trials > 0 else num_executors,
|
||||
max_concurrent,
|
||||
)
|
||||
if n_concurrent_trials < passed_in_n_concurrent_trials:
|
||||
logger.warning(
|
||||
f"The actual concurrent trials is {n_concurrent_trials}. You can set the environment "
|
||||
f"variable `FLAML_MAX_CONCURRENT` to '{passed_in_n_concurrent_trials}' to override the detected num of executors."
|
||||
)
|
||||
with parallel_backend("spark"):
|
||||
with Parallel(n_jobs=n_concurrent_trials, verbose=max(0, (verbose - 1) * 50)) as parallel:
|
||||
try:
|
||||
@@ -760,7 +777,7 @@ def run(
|
||||
and num_failures < upperbound_num_failures
|
||||
):
|
||||
if automl_info and automl_info[0] > 0 and time_budget_s < np.inf:
|
||||
time_budget_s -= automl_info[0]
|
||||
time_budget_s -= automl_info[0] * n_concurrent_trials
|
||||
logger.debug(f"Remaining time budget with mlflow log latency: {time_budget_s} seconds.")
|
||||
while len(_runner.running_trials) < n_concurrent_trials:
|
||||
# suggest trials for spark
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "2.3.2"
|
||||
__version__ = "2.4.0"
|
||||
|
||||
259
installed_all_dependencies_3.10_ubuntu-latest.txt
Normal file
259
installed_all_dependencies_3.10_ubuntu-latest.txt
Normal file
@@ -0,0 +1,259 @@
|
||||
absl-py==2.4.0
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.4
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.1.0
|
||||
async-timeout==5.0.1
|
||||
attrs==25.4.0
|
||||
autopage==0.6.0
|
||||
babel==2.18.0
|
||||
backports.strenum==1.3.1
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cliff==4.13.1
|
||||
cloudpickle==3.1.2
|
||||
cmaes==0.12.0
|
||||
cmd2==3.2.0
|
||||
cmdstanpy==1.3.0
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.2
|
||||
convertdate==2.4.1
|
||||
coverage==7.13.4
|
||||
cryptography==46.0.5
|
||||
cuda-bindings==12.9.4
|
||||
cuda-pathfinder==1.3.4
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.88.0
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
debugpy==1.8.20
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
exceptiongroup==1.3.1
|
||||
executing==2.2.1
|
||||
fastapi==0.129.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.24.0
|
||||
-e git+https://github.com/microsoft/FLAML@cc903f78074f93f2fcf644c046c9e3036ce7fb38#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.48.0
|
||||
graphviz==0.21
|
||||
greenlet==3.3.1
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
holidays==0.90
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface_hub==1.4.1
|
||||
identify==2.6.16
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
importlib_resources==6.5.2
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.2.0
|
||||
ipython==8.38.0
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.4
|
||||
jupyterlab==4.5.4
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.1
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
mdurl==0.1.2
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.1
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.16.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.4.2
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.3
|
||||
notebook_shim==0.2.4
|
||||
numpy==1.26.4
|
||||
nvidia-cublas-cu12==12.8.4.1
|
||||
nvidia-cuda-cupti-cu12==12.8.90
|
||||
nvidia-cuda-nvrtc-cu12==12.8.93
|
||||
nvidia-cuda-runtime-cu12==12.8.90
|
||||
nvidia-cudnn-cu12==9.10.2.21
|
||||
nvidia-cufft-cu12==11.3.3.83
|
||||
nvidia-cufile-cu12==1.13.1.3
|
||||
nvidia-curand-cu12==10.3.9.90
|
||||
nvidia-cusolver-cu12==11.7.3.90
|
||||
nvidia-cusparse-cu12==12.5.8.93
|
||||
nvidia-cusparselt-cu12==0.7.1
|
||||
nvidia-nccl-cu12==2.27.5
|
||||
nvidia-nvjitlink-cu12==12.8.93
|
||||
nvidia-nvshmem-cu12==3.4.5
|
||||
nvidia-nvtx-cu12==12.8.90
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==2.8.0
|
||||
overrides==7.7.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.6
|
||||
patsy==1.0.2
|
||||
pexpect==4.9.0
|
||||
pillow==12.1.1
|
||||
platformdirs==4.9.1
|
||||
plotly==6.5.2
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prettytable==3.17.0
|
||||
prometheus_client==0.24.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
prophet==1.3.0
|
||||
protobuf==6.33.5
|
||||
psutil==7.2.2
|
||||
ptyprocess==0.7.0
|
||||
pure_eval==0.2.3
|
||||
pyarrow==23.0.0
|
||||
pyasn1==0.6.2
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==3.0
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.2
|
||||
pyperclip==1.11.0
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2026.1.15
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf-python==3.12.0
|
||||
rich==14.3.2
|
||||
rich-argparse==1.7.2
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-base==0.13.1
|
||||
scikit-learn==1.7.2
|
||||
scipy==1.15.3
|
||||
Send2Trash==2.1.0
|
||||
seqeval==1.2.2
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.3
|
||||
SQLAlchemy==2.0.46
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
stanio==0.5.1
|
||||
starlette==0.52.1
|
||||
statsmodels==0.14.6
|
||||
stevedore==5.6.0
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
tomli==2.4.0
|
||||
torch==2.10.0
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.25.0
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.3
|
||||
traitlets==5.14.3
|
||||
transformers==5.1.0
|
||||
triton==3.6.0
|
||||
typer==0.23.1
|
||||
typer-slim==0.23.1
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.6.0
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==1.7.6
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
237
installed_all_dependencies_3.10_windows-latest.txt
Normal file
237
installed_all_dependencies_3.10_windows-latest.txt
Normal file
@@ -0,0 +1,237 @@
|
||||
absl-py==2.4.0
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.4
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.1.0
|
||||
async-timeout==5.0.1
|
||||
attrs==25.4.0
|
||||
autopage==0.6.0
|
||||
babel==2.18.0
|
||||
backports.strenum==1.3.1
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cliff==4.13.1
|
||||
cloudpickle==3.1.2
|
||||
cmaes==0.12.0
|
||||
cmd2==3.2.0
|
||||
colorama==0.4.6
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.2
|
||||
convertdate==2.4.1
|
||||
coverage==7.13.4
|
||||
cryptography==46.0.5
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.88.0
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
debugpy==1.8.20
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
exceptiongroup==1.3.1
|
||||
executing==2.2.1
|
||||
fastapi==0.129.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.24.0
|
||||
-e git+https://github.com/microsoft/FLAML@6f573938bf46a906dfba1101034fd05813268c29#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.48.0
|
||||
graphviz==0.21
|
||||
greenlet==3.3.1
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface_hub==1.4.1
|
||||
identify==2.6.16
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.2.0
|
||||
ipython==8.38.0
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.4
|
||||
jupyterlab==4.5.4
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.1
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
mdurl==0.1.2
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.1
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.16.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.4.2
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.3
|
||||
notebook_shim==0.2.4
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==2.8.0
|
||||
overrides==7.7.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.6
|
||||
patsy==1.0.2
|
||||
pillow==12.1.1
|
||||
platformdirs==4.9.1
|
||||
plotly==6.5.2
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prettytable==3.17.0
|
||||
prometheus_client==0.24.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
protobuf==6.33.5
|
||||
psutil==7.2.2
|
||||
pure_eval==0.2.3
|
||||
pyarrow==23.0.0
|
||||
pyasn1==0.6.2
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==3.0
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.2
|
||||
pyperclip==1.11.0
|
||||
pyreadline3==3.5.4
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
pytz==2025.2
|
||||
pywinpty==3.0.3
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2026.1.15
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf-python==3.12.0
|
||||
rich==14.3.2
|
||||
rich-argparse==1.7.2
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-base==0.13.1
|
||||
scikit-learn==1.7.2
|
||||
scipy==1.15.3
|
||||
Send2Trash==2.1.0
|
||||
seqeval==1.2.2
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.3
|
||||
SQLAlchemy==2.0.46
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
starlette==0.52.1
|
||||
statsmodels==0.14.6
|
||||
stevedore==5.6.0
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
tomli==2.4.0
|
||||
torch==2.10.0
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.25.0
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.3
|
||||
traitlets==5.14.3
|
||||
transformers==5.1.0
|
||||
typer==0.23.1
|
||||
typer-slim==0.23.1
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.6.0
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==1.7.6
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
217
installed_all_dependencies_3.11_macos-latest.txt
Normal file
217
installed_all_dependencies_3.11_macos-latest.txt
Normal file
@@ -0,0 +1,217 @@
|
||||
absl-py==2.3.1
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.0
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
appnope==0.1.4
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.0.5
|
||||
attrs==25.4.0
|
||||
babel==2.17.0
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cloudpickle==3.1.2
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.3
|
||||
convertdate==2.4.0
|
||||
coverage==7.13.1
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.77.0
|
||||
dataclasses==0.6
|
||||
datasets==4.4.2
|
||||
debugpy==1.8.19
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
executing==2.2.1
|
||||
fastapi==0.128.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.20.3
|
||||
-e git+https://github.com/microsoft/FLAML@3ab9ce3cda330a54210c591e89b7f8674948d607#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.47.0
|
||||
graphviz==0.21
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface-hub==0.36.0
|
||||
identify==2.6.15
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.1.0
|
||||
ipython==9.9.0
|
||||
ipython_pygments_lexers==1.1.1
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.3
|
||||
jupyterlab==4.5.1
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.0
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.0
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.15.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.16.6
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.6.1
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.1
|
||||
notebook_shim==0.2.4
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==3.6.1
|
||||
overrides==7.7.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.5
|
||||
patsy==1.0.2
|
||||
pexpect==4.9.0
|
||||
pillow==12.1.0
|
||||
platformdirs==4.5.1
|
||||
plotly==6.5.1
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prometheus_client==0.23.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
protobuf==6.33.3
|
||||
psutil==7.2.1
|
||||
ptyprocess==0.7.0
|
||||
pure_eval==0.2.3
|
||||
pyarrow==22.0.0
|
||||
pyasn1==0.6.1
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==2.23
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.1
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.5.0
|
||||
pytorch-lightning==2.6.0
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2025.11.3
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf_python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.16.3
|
||||
Send2Trash==2.0.0
|
||||
seqeval==1.2.2
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.1
|
||||
SQLAlchemy==2.0.45
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
starlette==0.50.0
|
||||
statsmodels==0.14.6
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
torch==2.9.1
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.24.1
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.1
|
||||
traitlets==5.14.3
|
||||
transformers==4.57.3
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.2.14
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==3.1.3
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
258
installed_all_dependencies_3.11_ubuntu-latest.txt
Normal file
258
installed_all_dependencies_3.11_ubuntu-latest.txt
Normal file
@@ -0,0 +1,258 @@
|
||||
absl-py==2.4.0
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.4
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.1.0
|
||||
attrs==25.4.0
|
||||
autopage==0.6.0
|
||||
babel==2.18.0
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cliff==4.13.1
|
||||
cloudpickle==3.1.2
|
||||
cmaes==0.12.0
|
||||
cmd2==3.2.0
|
||||
cmdstanpy==1.3.0
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.3
|
||||
convertdate==2.4.1
|
||||
coverage==7.13.4
|
||||
cryptography==46.0.5
|
||||
cuda-bindings==12.9.4
|
||||
cuda-pathfinder==1.3.4
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.88.0
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
debugpy==1.8.20
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
executing==2.2.1
|
||||
fastapi==0.129.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.24.0
|
||||
-e git+https://github.com/microsoft/FLAML@4ac0347e5a032ff01b3ad05ac51863cfbd1a1b62#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.48.0
|
||||
graphviz==0.21
|
||||
greenlet==3.3.1
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
holidays==0.90
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface_hub==1.4.1
|
||||
identify==2.6.16
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
importlib_resources==6.5.2
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.2.0
|
||||
ipython==9.10.0
|
||||
ipython_pygments_lexers==1.1.1
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.4
|
||||
jupyterlab==4.5.4
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.1
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
mdurl==0.1.2
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.1
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.16.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.6.1
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.3
|
||||
notebook_shim==0.2.4
|
||||
numpy==1.26.4
|
||||
nvidia-cublas-cu12==12.8.4.1
|
||||
nvidia-cuda-cupti-cu12==12.8.90
|
||||
nvidia-cuda-nvrtc-cu12==12.8.93
|
||||
nvidia-cuda-runtime-cu12==12.8.90
|
||||
nvidia-cudnn-cu12==9.10.2.21
|
||||
nvidia-cufft-cu12==11.3.3.83
|
||||
nvidia-cufile-cu12==1.13.1.3
|
||||
nvidia-curand-cu12==10.3.9.90
|
||||
nvidia-cusolver-cu12==11.7.3.90
|
||||
nvidia-cusparse-cu12==12.5.8.93
|
||||
nvidia-cusparselt-cu12==0.7.1
|
||||
nvidia-nccl-cu12==2.27.5
|
||||
nvidia-nvjitlink-cu12==12.8.93
|
||||
nvidia-nvshmem-cu12==3.4.5
|
||||
nvidia-nvtx-cu12==12.8.90
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==2.8.0
|
||||
overrides==7.7.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.6
|
||||
patsy==1.0.2
|
||||
pexpect==4.9.0
|
||||
pillow==12.1.1
|
||||
platformdirs==4.9.1
|
||||
plotly==6.5.2
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prettytable==3.17.0
|
||||
prometheus_client==0.24.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
prophet==1.3.0
|
||||
protobuf==6.33.5
|
||||
psutil==7.2.2
|
||||
ptyprocess==0.7.0
|
||||
pure_eval==0.2.3
|
||||
py4j==0.10.9.7
|
||||
pyarrow==23.0.0
|
||||
pyasn1==0.6.2
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==3.0
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.2
|
||||
pyperclip==1.11.0
|
||||
pyspark==3.5.1
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2026.1.15
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf-python==3.12.0
|
||||
rich==14.3.2
|
||||
rich-argparse==1.7.2
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-base==0.13.1
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
Send2Trash==2.1.0
|
||||
seqeval==1.2.2
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.3
|
||||
SQLAlchemy==2.0.46
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
stanio==0.5.1
|
||||
starlette==0.52.1
|
||||
statsmodels==0.14.6
|
||||
stevedore==5.6.0
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
torch==2.10.0
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.25.0
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.3
|
||||
traitlets==5.14.3
|
||||
transformers==5.1.0
|
||||
triton==3.6.0
|
||||
typer==0.23.1
|
||||
typer-slim==0.23.1
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.6.0
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==3.2.0
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
234
installed_all_dependencies_3.11_windows-latest.txt
Normal file
234
installed_all_dependencies_3.11_windows-latest.txt
Normal file
@@ -0,0 +1,234 @@
|
||||
absl-py==2.4.0
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.4
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.1.0
|
||||
attrs==25.4.0
|
||||
autopage==0.6.0
|
||||
babel==2.18.0
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cliff==4.13.1
|
||||
cloudpickle==3.1.2
|
||||
cmaes==0.12.0
|
||||
cmd2==3.2.0
|
||||
colorama==0.4.6
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.3
|
||||
convertdate==2.4.1
|
||||
coverage==7.13.4
|
||||
cryptography==46.0.5
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.88.0
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
debugpy==1.8.20
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
executing==2.2.1
|
||||
fastapi==0.129.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.21.2
|
||||
-e git+https://github.com/microsoft/FLAML@06111054a40bde57cca96dccd7476a3211b19893#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.48.0
|
||||
graphviz==0.21
|
||||
greenlet==3.3.1
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface_hub==1.4.1
|
||||
identify==2.6.16
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.2.0
|
||||
ipython==9.10.0
|
||||
ipython_pygments_lexers==1.1.1
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.4
|
||||
jupyterlab==4.5.4
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.1
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
mdurl==0.1.2
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.1
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.16.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.6.1
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.3
|
||||
notebook_shim==0.2.4
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==2.8.0
|
||||
overrides==7.7.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.6
|
||||
patsy==1.0.2
|
||||
pillow==12.1.1
|
||||
platformdirs==4.7.0
|
||||
plotly==6.5.2
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prettytable==3.17.0
|
||||
prometheus_client==0.24.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
protobuf==6.33.5
|
||||
psutil==7.2.2
|
||||
pure_eval==0.2.3
|
||||
pyarrow==23.0.0
|
||||
pyasn1==0.6.2
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==3.0
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.2
|
||||
pyperclip==1.11.0
|
||||
pyreadline3==3.5.4
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
pytz==2025.2
|
||||
pywinpty==3.0.3
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2026.1.15
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf-python==3.12.0
|
||||
rich==14.3.2
|
||||
rich-argparse==1.7.2
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-base==0.13.1
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
Send2Trash==2.1.0
|
||||
seqeval==1.2.2
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.3
|
||||
SQLAlchemy==2.0.46
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
starlette==0.52.1
|
||||
statsmodels==0.14.6
|
||||
stevedore==5.6.0
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
torch==2.10.0
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.25.0
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.3
|
||||
traitlets==5.14.3
|
||||
transformers==5.1.0
|
||||
typer==0.23.0
|
||||
typer-slim==0.23.0
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.6.0
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==3.2.0
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
259
installed_all_dependencies_3.12_ubuntu-latest.txt
Normal file
259
installed_all_dependencies_3.12_ubuntu-latest.txt
Normal file
@@ -0,0 +1,259 @@
|
||||
absl-py==2.4.0
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.4
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.1.0
|
||||
attrs==25.4.0
|
||||
autopage==0.6.0
|
||||
babel==2.18.0
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cliff==4.13.1
|
||||
cloudpickle==3.1.2
|
||||
cmaes==0.12.0
|
||||
cmd2==3.2.0
|
||||
cmdstanpy==1.3.0
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.3
|
||||
convertdate==2.4.1
|
||||
coverage==7.13.4
|
||||
cryptography==46.0.5
|
||||
cuda-bindings==12.9.4
|
||||
cuda-pathfinder==1.3.4
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.88.0
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
debugpy==1.8.20
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
executing==2.2.1
|
||||
fastapi==0.129.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.24.0
|
||||
-e git+https://github.com/microsoft/FLAML@7083042210ce43ef6dd6cd2d2b71896f23e645d5#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.48.0
|
||||
graphviz==0.21
|
||||
greenlet==3.3.1
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
holidays==0.90
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface_hub==1.4.1
|
||||
identify==2.6.16
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
importlib_resources==6.5.2
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.2.0
|
||||
ipython==9.10.0
|
||||
ipython_pygments_lexers==1.1.1
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.4
|
||||
jupyterlab==4.5.4
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.1
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
mdurl==0.1.2
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.1
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.16.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.6.1
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.3
|
||||
notebook_shim==0.2.4
|
||||
numpy==1.26.4
|
||||
nvidia-cublas-cu12==12.8.4.1
|
||||
nvidia-cuda-cupti-cu12==12.8.90
|
||||
nvidia-cuda-nvrtc-cu12==12.8.93
|
||||
nvidia-cuda-runtime-cu12==12.8.90
|
||||
nvidia-cudnn-cu12==9.10.2.21
|
||||
nvidia-cufft-cu12==11.3.3.83
|
||||
nvidia-cufile-cu12==1.13.1.3
|
||||
nvidia-curand-cu12==10.3.9.90
|
||||
nvidia-cusolver-cu12==11.7.3.90
|
||||
nvidia-cusparse-cu12==12.5.8.93
|
||||
nvidia-cusparselt-cu12==0.7.1
|
||||
nvidia-nccl-cu12==2.27.5
|
||||
nvidia-nvjitlink-cu12==12.8.93
|
||||
nvidia-nvshmem-cu12==3.4.5
|
||||
nvidia-nvtx-cu12==12.8.90
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.6
|
||||
patsy==1.0.2
|
||||
pexpect==4.9.0
|
||||
pillow==12.1.1
|
||||
platformdirs==4.9.1
|
||||
plotly==6.5.2
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prettytable==3.17.0
|
||||
prometheus_client==0.24.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
prophet==1.3.0
|
||||
protobuf==6.33.5
|
||||
psutil==7.2.2
|
||||
ptyprocess==0.7.0
|
||||
pure_eval==0.2.3
|
||||
py4j==0.10.9.9
|
||||
pyarrow==23.0.0
|
||||
pyasn1==0.6.2
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==3.0
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.2
|
||||
pyperclip==1.11.0
|
||||
pyspark==4.0.1
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2026.1.15
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf-python==3.12.0
|
||||
rich==14.3.2
|
||||
rich-argparse==1.7.2
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-base==0.13.1
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
Send2Trash==2.1.0
|
||||
seqeval==1.2.2
|
||||
setuptools==81.0.0
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.3
|
||||
SQLAlchemy==2.0.46
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
stanio==0.5.1
|
||||
starlette==0.52.1
|
||||
statsmodels==0.14.6
|
||||
stevedore==5.6.0
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
torch==2.10.0
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.25.0
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.3
|
||||
traitlets==5.14.3
|
||||
transformers==5.1.0
|
||||
triton==3.6.0
|
||||
typer==0.23.1
|
||||
typer-slim==0.23.1
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.6.0
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
wheel==0.46.3
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==3.2.0
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
238
installed_all_dependencies_3.12_windows-latest.txt
Normal file
238
installed_all_dependencies_3.12_windows-latest.txt
Normal file
@@ -0,0 +1,238 @@
|
||||
absl-py==2.4.0
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.4
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
argcomplete==3.6.3
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.1.0
|
||||
attrs==25.4.0
|
||||
autopage==0.6.0
|
||||
babel==2.18.0
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cliff==4.13.1
|
||||
cloudpickle==3.1.2
|
||||
cmaes==0.12.0
|
||||
cmd2==3.2.0
|
||||
colorama==0.4.6
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.3
|
||||
convertdate==2.4.1
|
||||
coverage==7.13.4
|
||||
cryptography==46.0.5
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.88.0
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
debugpy==1.8.20
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
executing==2.2.1
|
||||
fastapi==0.129.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.24.0
|
||||
-e git+https://github.com/microsoft/FLAML@579f795f333e6eb7e7e9c769a1f24721bc86700b#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.48.0
|
||||
graphviz==0.21
|
||||
greenlet==3.3.1
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface_hub==1.4.1
|
||||
identify==2.6.16
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.2.0
|
||||
ipython==9.10.0
|
||||
ipython_pygments_lexers==1.1.1
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.4
|
||||
jupyterlab==4.5.4
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.1
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
mdurl==0.1.2
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.1
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.16.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.6.1
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.3
|
||||
notebook_shim==0.2.4
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.6
|
||||
patsy==1.0.2
|
||||
pillow==12.1.1
|
||||
pipx==1.8.0
|
||||
platformdirs==4.5.1
|
||||
plotly==6.5.2
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prettytable==3.17.0
|
||||
prometheus_client==0.24.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
protobuf==6.33.5
|
||||
psutil==7.2.2
|
||||
pure_eval==0.2.3
|
||||
pyarrow==23.0.0
|
||||
pyasn1==0.6.2
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==3.0
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.2
|
||||
pyperclip==1.11.0
|
||||
pyreadline3==3.5.4
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
pytz==2025.2
|
||||
pywinpty==3.0.3
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2026.1.15
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf-python==3.12.0
|
||||
rich==14.3.2
|
||||
rich-argparse==1.7.2
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-base==0.13.1
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
Send2Trash==2.1.0
|
||||
seqeval==1.2.2
|
||||
setuptools==81.0.0
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.3
|
||||
SQLAlchemy==2.0.46
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
starlette==0.52.1
|
||||
statsmodels==0.14.6
|
||||
stevedore==5.6.0
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
torch==2.10.0
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.25.0
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.3
|
||||
traitlets==5.14.3
|
||||
transformers==5.1.0
|
||||
typer==0.23.1
|
||||
typer-slim==0.23.1
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
userpath==1.9.2
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.6.0
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
wheel==0.46.3
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==3.2.0
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
259
installed_all_dependencies_3.13_ubuntu-latest.txt
Normal file
259
installed_all_dependencies_3.13_ubuntu-latest.txt
Normal file
@@ -0,0 +1,259 @@
|
||||
absl-py==2.4.0
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.4
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.1.0
|
||||
attrs==25.4.0
|
||||
autopage==0.6.0
|
||||
babel==2.18.0
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cliff==4.13.1
|
||||
cloudpickle==3.1.2
|
||||
cmaes==0.12.0
|
||||
cmd2==3.2.0
|
||||
cmdstanpy==1.3.0
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.3
|
||||
convertdate==2.4.1
|
||||
coverage==7.13.4
|
||||
cryptography==46.0.5
|
||||
cuda-bindings==12.9.4
|
||||
cuda-pathfinder==1.3.4
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.88.0
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
debugpy==1.8.20
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
executing==2.2.1
|
||||
fastapi==0.129.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.24.0
|
||||
-e git+https://github.com/microsoft/FLAML@1d301b413c65128d61006a4c19a95a911ca308ad#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.48.0
|
||||
graphviz==0.21
|
||||
greenlet==3.3.1
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
holidays==0.90
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface_hub==1.4.1
|
||||
identify==2.6.16
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
importlib_resources==6.5.2
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.2.0
|
||||
ipython==9.10.0
|
||||
ipython_pygments_lexers==1.1.1
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.4
|
||||
jupyterlab==4.5.4
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.1
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
mdurl==0.1.2
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.1
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.16.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.6.1
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.3
|
||||
notebook_shim==0.2.4
|
||||
numpy==2.4.2
|
||||
nvidia-cublas-cu12==12.8.4.1
|
||||
nvidia-cuda-cupti-cu12==12.8.90
|
||||
nvidia-cuda-nvrtc-cu12==12.8.93
|
||||
nvidia-cuda-runtime-cu12==12.8.90
|
||||
nvidia-cudnn-cu12==9.10.2.21
|
||||
nvidia-cufft-cu12==11.3.3.83
|
||||
nvidia-cufile-cu12==1.13.1.3
|
||||
nvidia-curand-cu12==10.3.9.90
|
||||
nvidia-cusolver-cu12==11.7.3.90
|
||||
nvidia-cusparse-cu12==12.5.8.93
|
||||
nvidia-cusparselt-cu12==0.7.1
|
||||
nvidia-nccl-cu12==2.27.5
|
||||
nvidia-nvjitlink-cu12==12.8.93
|
||||
nvidia-nvshmem-cu12==3.4.5
|
||||
nvidia-nvtx-cu12==12.8.90
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.6
|
||||
patsy==1.0.2
|
||||
pexpect==4.9.0
|
||||
pillow==12.1.1
|
||||
platformdirs==4.9.1
|
||||
plotly==6.5.2
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prettytable==3.17.0
|
||||
prometheus_client==0.24.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
prophet==1.3.0
|
||||
protobuf==6.33.5
|
||||
psutil==7.2.2
|
||||
ptyprocess==0.7.0
|
||||
pure_eval==0.2.3
|
||||
py4j==0.10.9.9
|
||||
pyarrow==23.0.0
|
||||
pyasn1==0.6.2
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==3.0
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.2
|
||||
pyperclip==1.11.0
|
||||
pyspark==4.1.0
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2026.1.15
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf-python==3.12.0
|
||||
rich==14.3.2
|
||||
rich-argparse==1.7.2
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-base==0.13.1
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
Send2Trash==2.1.0
|
||||
seqeval==1.2.2
|
||||
setuptools==81.0.0
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.3
|
||||
SQLAlchemy==2.0.46
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
stanio==0.5.1
|
||||
starlette==0.52.1
|
||||
statsmodels==0.14.6
|
||||
stevedore==5.6.0
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
torch==2.10.0
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.25.0
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.3
|
||||
traitlets==5.14.3
|
||||
transformers==5.1.0
|
||||
triton==3.6.0
|
||||
typer==0.23.1
|
||||
typer-slim==0.23.1
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.6.0
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
wheel==0.46.3
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==3.2.0
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
235
installed_all_dependencies_3.13_windows-latest.txt
Normal file
235
installed_all_dependencies_3.13_windows-latest.txt
Normal file
@@ -0,0 +1,235 @@
|
||||
absl-py==2.4.0
|
||||
accelerate==1.12.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
aiohttp==3.13.3
|
||||
aiosignal==1.4.0
|
||||
alembic==1.18.4
|
||||
annotated-doc==0.0.4
|
||||
annotated-types==0.7.0
|
||||
anyio==4.12.1
|
||||
argon2-cffi==25.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
arrow==1.4.0
|
||||
asttokens==3.0.1
|
||||
async-lru==2.1.0
|
||||
attrs==25.4.0
|
||||
autopage==0.6.0
|
||||
babel==2.18.0
|
||||
beautifulsoup4==4.14.3
|
||||
bleach==6.3.0
|
||||
cachetools==5.5.2
|
||||
catboost==1.2.8
|
||||
certifi==2026.1.4
|
||||
cffi==2.0.0
|
||||
cfgv==3.5.0
|
||||
charset-normalizer==3.4.4
|
||||
click==8.3.1
|
||||
cliff==4.13.1
|
||||
cloudpickle==3.1.2
|
||||
cmaes==0.12.0
|
||||
cmd2==3.2.0
|
||||
colorama==0.4.6
|
||||
colorlog==6.10.1
|
||||
comm==0.2.3
|
||||
contourpy==1.3.3
|
||||
convertdate==2.4.1
|
||||
coverage==7.13.4
|
||||
cryptography==46.0.5
|
||||
cycler==0.12.1
|
||||
databricks-sdk==0.88.0
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
debugpy==1.8.20
|
||||
decorator==5.2.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.4.0
|
||||
distlib==0.4.0
|
||||
evaluate==0.4.6
|
||||
executing==2.2.1
|
||||
fastapi==0.129.0
|
||||
fastjsonschema==2.21.2
|
||||
filelock==3.24.0
|
||||
-e git+https://github.com/microsoft/FLAML@897eae6616b1c24037ce8d3dae6c0aa3263e85a9#egg=FLAML
|
||||
fonttools==4.61.1
|
||||
fqdn==1.5.1
|
||||
frozenlist==1.8.0
|
||||
fsspec==2025.10.0
|
||||
gitdb==4.0.12
|
||||
GitPython==3.1.46
|
||||
google-auth==2.48.0
|
||||
graphviz==0.21
|
||||
greenlet==3.3.1
|
||||
h11==0.16.0
|
||||
hcrystalball==0.1.12
|
||||
hf-xet==1.2.0
|
||||
httpcore==1.0.9
|
||||
httpx==0.28.1
|
||||
huggingface_hub==1.4.1
|
||||
identify==2.6.16
|
||||
idna==3.11
|
||||
importlib_metadata==8.7.1
|
||||
iniconfig==2.3.0
|
||||
ipykernel==7.2.0
|
||||
ipython==9.10.0
|
||||
ipython_pygments_lexers==1.1.1
|
||||
ipywidgets==8.1.8
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.2
|
||||
Jinja2==3.1.6
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
json5==0.13.0
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.26.0
|
||||
jsonschema-specifications==2025.9.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.12.0
|
||||
jupyter-lsp==2.3.0
|
||||
jupyter_client==8.8.0
|
||||
jupyter_core==5.9.1
|
||||
jupyter_server==2.17.0
|
||||
jupyter_server_terminals==0.5.4
|
||||
jupyterlab==4.5.4
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.28.0
|
||||
jupyterlab_widgets==3.0.16
|
||||
kiwisolver==1.4.9
|
||||
lark==1.3.1
|
||||
liac-arff==2.5.0
|
||||
lightgbm==4.6.0
|
||||
lightning==2.6.1
|
||||
lightning-utilities==0.15.2
|
||||
lunardate==0.2.2
|
||||
Mako==1.3.10
|
||||
markdown-it-py==4.0.0
|
||||
MarkupSafe==3.0.3
|
||||
matplotlib==3.10.8
|
||||
matplotlib-inline==0.2.1
|
||||
mdurl==0.1.2
|
||||
minio==7.2.20
|
||||
mistune==3.2.0
|
||||
mlflow-skinny==2.22.1
|
||||
mpmath==1.3.0
|
||||
multidict==6.7.1
|
||||
multiprocess==0.70.18
|
||||
narwhals==2.16.0
|
||||
nbclient==0.10.4
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
networkx==3.6.1
|
||||
nltk==3.9.2
|
||||
nodeenv==1.10.0
|
||||
notebook==7.5.3
|
||||
notebook_shim==0.2.4
|
||||
numpy==2.4.2
|
||||
openml==0.15.1
|
||||
opentelemetry-api==1.39.1
|
||||
opentelemetry-sdk==1.39.1
|
||||
opentelemetry-semantic-conventions==0.60b1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.6
|
||||
patsy==1.0.2
|
||||
pillow==12.1.1
|
||||
platformdirs==4.9.1
|
||||
plotly==6.5.2
|
||||
pluggy==1.6.0
|
||||
pre_commit==4.5.1
|
||||
prettytable==3.17.0
|
||||
prometheus_client==0.24.1
|
||||
prompt_toolkit==3.0.52
|
||||
propcache==0.4.1
|
||||
protobuf==6.33.5
|
||||
psutil==7.2.2
|
||||
pure_eval==0.2.3
|
||||
pyarrow==23.0.0
|
||||
pyasn1==0.6.2
|
||||
pyasn1_modules==0.4.2
|
||||
pycparser==3.0
|
||||
pycryptodome==3.23.0
|
||||
pydantic==2.12.5
|
||||
pydantic_core==2.41.5
|
||||
Pygments==2.19.2
|
||||
pyluach==2.3.0
|
||||
PyMeeus==0.5.12
|
||||
pyparsing==3.3.2
|
||||
pyperclip==1.11.0
|
||||
pyreadline3==3.5.4
|
||||
pytest==9.0.2
|
||||
pytest-rerunfailures==16.1
|
||||
python-dateutil==2.9.0.post0
|
||||
python-json-logger==4.0.0
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
pytz==2025.2
|
||||
pywinpty==3.0.3
|
||||
PyYAML==6.0.3
|
||||
pyzmq==27.1.0
|
||||
referencing==0.37.0
|
||||
regex==2026.1.15
|
||||
requests==2.32.5
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rfc3987-syntax==1.1.0
|
||||
rgf-python==3.12.0
|
||||
rich==14.3.2
|
||||
rich-argparse==1.7.2
|
||||
rouge_score==0.1.2
|
||||
rpds-py==0.30.0
|
||||
rsa==4.9.1
|
||||
safetensors==0.7.0
|
||||
scikit-base==0.13.1
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
Send2Trash==2.1.0
|
||||
seqeval==1.2.2
|
||||
setuptools==81.0.0
|
||||
shellingham==1.5.4
|
||||
six==1.17.0
|
||||
smmap==5.0.2
|
||||
soupsieve==2.8.3
|
||||
SQLAlchemy==2.0.46
|
||||
sqlparse==0.5.5
|
||||
stack-data==0.6.3
|
||||
starlette==0.52.1
|
||||
statsmodels==0.14.6
|
||||
stevedore==5.6.0
|
||||
sympy==1.14.0
|
||||
tensorboardX==2.6.4
|
||||
terminado==0.18.1
|
||||
thop==0.1.1.post2209072238
|
||||
threadpoolctl==3.6.0
|
||||
tinycss2==1.4.0
|
||||
tokenizers==0.22.2
|
||||
torch==2.10.0
|
||||
torchmetrics==1.8.2
|
||||
torchvision==0.25.0
|
||||
tornado==6.5.4
|
||||
tqdm==4.67.3
|
||||
traitlets==5.14.3
|
||||
transformers==5.1.0
|
||||
typer==0.23.1
|
||||
typer-slim==0.23.1
|
||||
typing-inspection==0.4.2
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.3
|
||||
uri-template==1.3.0
|
||||
urllib3==2.6.3
|
||||
uvicorn==0.40.0
|
||||
virtualenv==20.36.1
|
||||
wcwidth==0.6.0
|
||||
webcolors==25.10.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.9.0
|
||||
wheel==0.46.3
|
||||
widgetsnbextension==4.0.15
|
||||
workalendar==17.0.0
|
||||
xgboost==3.2.0
|
||||
xmltodict==1.0.2
|
||||
xxhash==3.6.0
|
||||
yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
42
installed_first_tier_dependencies_3.10_ubuntu-latest.txt
Normal file
42
installed_first_tier_dependencies_3.10_ubuntu-latest.txt
Normal file
@@ -0,0 +1,42 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.4
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.2.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
prophet==1.3.0
|
||||
psutil==7.2.2
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
requests==2.32.5
|
||||
rgf-python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.7.2
|
||||
scipy==1.15.3
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.10.0
|
||||
torchvision==0.25.0
|
||||
transformers==5.1.0
|
||||
xgboost==1.7.6
|
||||
xgboost==1.7.6
|
||||
Current commit hash: cc903f78074f93f2fcf644c046c9e3036ce7fb38
|
||||
41
installed_first_tier_dependencies_3.10_windows-latest.txt
Normal file
41
installed_first_tier_dependencies_3.10_windows-latest.txt
Normal file
@@ -0,0 +1,41 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.4
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.2.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
psutil==7.2.2
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
requests==2.32.5
|
||||
rgf-python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.7.2
|
||||
scipy==1.15.3
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.10.0
|
||||
torchvision==0.25.0
|
||||
transformers==5.1.0
|
||||
xgboost==1.7.6
|
||||
xgboost==1.7.6
|
||||
Current commit hash: 6f573938bf46a906dfba1101034fd05813268c29
|
||||
39
installed_first_tier_dependencies_3.11_macos-latest.txt
Normal file
39
installed_first_tier_dependencies_3.11_macos-latest.txt
Normal file
@@ -0,0 +1,39 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.1
|
||||
dataclasses==0.6
|
||||
datasets==4.4.2
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.1.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.16.6
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
optuna==3.6.1
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
psutil==7.2.1
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.5.0
|
||||
pytorch-lightning==2.6.0
|
||||
requests==2.32.5
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.16.3
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.9.1
|
||||
torchvision==0.24.1
|
||||
transformers==4.57.3
|
||||
xgboost==3.1.3
|
||||
Current commit hash: 3ab9ce3cda330a54210c591e89b7f8674948d607
|
||||
42
installed_first_tier_dependencies_3.11_ubuntu-latest.txt
Normal file
42
installed_first_tier_dependencies_3.11_ubuntu-latest.txt
Normal file
@@ -0,0 +1,42 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.4
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.2.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
prophet==1.3.0
|
||||
psutil==7.2.2
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
requests==2.32.5
|
||||
rgf-python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.10.0
|
||||
torchvision==0.25.0
|
||||
transformers==5.1.0
|
||||
xgboost==3.2.0
|
||||
xgboost==3.2.0
|
||||
Current commit hash: 4ac0347e5a032ff01b3ad05ac51863cfbd1a1b62
|
||||
41
installed_first_tier_dependencies_3.11_windows-latest.txt
Normal file
41
installed_first_tier_dependencies_3.11_windows-latest.txt
Normal file
@@ -0,0 +1,41 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.4
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.2.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
psutil==7.2.2
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
requests==2.32.5
|
||||
rgf-python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.10.0
|
||||
torchvision==0.25.0
|
||||
transformers==5.1.0
|
||||
xgboost==3.2.0
|
||||
xgboost==3.2.0
|
||||
Current commit hash: 06111054a40bde57cca96dccd7476a3211b19893
|
||||
42
installed_first_tier_dependencies_3.12_ubuntu-latest.txt
Normal file
42
installed_first_tier_dependencies_3.12_ubuntu-latest.txt
Normal file
@@ -0,0 +1,42 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.4
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.2.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
prophet==1.3.0
|
||||
psutil==7.2.2
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
requests==2.32.5
|
||||
rgf-python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.10.0
|
||||
torchvision==0.25.0
|
||||
transformers==5.1.0
|
||||
xgboost==3.2.0
|
||||
xgboost==3.2.0
|
||||
Current commit hash: 7083042210ce43ef6dd6cd2d2b71896f23e645d5
|
||||
41
installed_first_tier_dependencies_3.12_windows-latest.txt
Normal file
41
installed_first_tier_dependencies_3.12_windows-latest.txt
Normal file
@@ -0,0 +1,41 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.4
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.2.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==1.26.4
|
||||
openml==0.15.1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
psutil==7.2.2
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
requests==2.32.5
|
||||
rgf-python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.10.0
|
||||
torchvision==0.25.0
|
||||
transformers==5.1.0
|
||||
xgboost==3.2.0
|
||||
xgboost==3.2.0
|
||||
Current commit hash: 579f795f333e6eb7e7e9c769a1f24721bc86700b
|
||||
42
installed_first_tier_dependencies_3.13_ubuntu-latest.txt
Normal file
42
installed_first_tier_dependencies_3.13_ubuntu-latest.txt
Normal file
@@ -0,0 +1,42 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.4
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.2.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==2.4.2
|
||||
openml==0.15.1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
prophet==1.3.0
|
||||
psutil==7.2.2
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
requests==2.32.5
|
||||
rgf-python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.10.0
|
||||
torchvision==0.25.0
|
||||
transformers==5.1.0
|
||||
xgboost==3.2.0
|
||||
xgboost==3.2.0
|
||||
Current commit hash: 1d301b413c65128d61006a4c19a95a911ca308ad
|
||||
41
installed_first_tier_dependencies_3.13_windows-latest.txt
Normal file
41
installed_first_tier_dependencies_3.13_windows-latest.txt
Normal file
@@ -0,0 +1,41 @@
|
||||
catboost==1.2.8
|
||||
coverage==7.13.4
|
||||
dataclasses==0.6
|
||||
datasets==4.5.0
|
||||
dill==0.4.0
|
||||
evaluate==0.4.6
|
||||
hcrystalball==0.1.12
|
||||
ipykernel==7.2.0
|
||||
joblib==1.3.2
|
||||
joblibspark==0.6.0
|
||||
jupyter==1.1.1
|
||||
lightgbm==4.6.0
|
||||
mlflow-skinny==2.22.1
|
||||
nbconvert==7.17.0
|
||||
nbformat==5.10.4
|
||||
nltk==3.9.2
|
||||
numpy==2.4.2
|
||||
openml==0.15.1
|
||||
optuna==2.8.0
|
||||
packaging==24.2
|
||||
pandas==2.3.3
|
||||
psutil==7.2.2
|
||||
pytest-rerunfailures==16.1
|
||||
pytest==9.0.2
|
||||
pytorch-forecasting==1.6.1
|
||||
pytorch-lightning==2.6.1
|
||||
requests==2.32.5
|
||||
rgf-python==3.12.0
|
||||
rouge_score==0.1.2
|
||||
scikit-learn==1.8.0
|
||||
scipy==1.17.0
|
||||
seqeval==1.2.2
|
||||
statsmodels==0.14.6
|
||||
tensorboardX==2.6.4
|
||||
thop==0.1.1-2209072238
|
||||
torch==2.10.0
|
||||
torchvision==0.25.0
|
||||
transformers==5.1.0
|
||||
xgboost==3.2.0
|
||||
xgboost==3.2.0
|
||||
Current commit hash: 897eae6616b1c24037ce8d3dae6c0aa3263e85a9
|
||||
3
pytest.ini
Normal file
3
pytest.ini
Normal file
@@ -0,0 +1,3 @@
|
||||
[pytest]
|
||||
markers =
|
||||
spark: mark a test as requiring Spark
|
||||
78
setup.py
78
setup.py
@@ -51,60 +51,59 @@ setuptools.setup(
|
||||
"joblib<=1.3.2",
|
||||
],
|
||||
"test": [
|
||||
"jupyter",
|
||||
"numpy>=1.17,<2.0.0; python_version<'3.13'",
|
||||
"numpy>2.0.0; python_version>='3.13'",
|
||||
"jupyter; python_version<'3.13'",
|
||||
"lightgbm>=2.3.1",
|
||||
"xgboost>=0.90,<2.0.0",
|
||||
"xgboost>=0.90,<2.0.0; python_version<'3.11'",
|
||||
"xgboost>=2.0.0; python_version>='3.11'",
|
||||
"scipy>=1.4.1",
|
||||
"pandas>=1.1.4,<2.0.0; python_version<'3.10'",
|
||||
"pandas>=1.1.4; python_version>='3.10'",
|
||||
"scikit-learn>=1.0.0",
|
||||
"scikit-learn>=1.2.0",
|
||||
"thop",
|
||||
"pytest>=6.1.1",
|
||||
"pytest-rerunfailures>=13.0",
|
||||
"coverage>=5.3",
|
||||
"pre-commit",
|
||||
"torch",
|
||||
"torchvision",
|
||||
"catboost>=0.26,<1.2; python_version<'3.11'",
|
||||
"catboost>=0.26; python_version>='3.11'",
|
||||
"catboost>=0.26; python_version<'3.13'",
|
||||
"rgf-python",
|
||||
"optuna>=2.8.0,<=3.6.1",
|
||||
"openml",
|
||||
"openml; python_version<'3.13'",
|
||||
"statsmodels>=0.12.2",
|
||||
"psutil==5.8.0",
|
||||
"psutil",
|
||||
"dataclasses",
|
||||
"transformers[torch]==4.26",
|
||||
"transformers[torch]",
|
||||
"datasets",
|
||||
"nltk<=3.8.1", # 3.8.2 doesn't work with mlflow
|
||||
"evaluate",
|
||||
"nltk!=3.8.2", # 3.8.2 doesn't work with mlflow
|
||||
"rouge_score",
|
||||
"hcrystalball==0.1.10",
|
||||
"hcrystalball",
|
||||
"seqeval",
|
||||
"pytorch-forecasting>=0.9.0,<=0.10.1; python_version<'3.11'",
|
||||
# "pytorch-forecasting==0.10.1; python_version=='3.11'",
|
||||
"mlflow==2.15.1",
|
||||
"pytorch-forecasting; python_version<'3.13'",
|
||||
"mlflow-skinny<=2.22.1", # Refer to https://mvnrepository.com/artifact/org.mlflow/mlflow-spark
|
||||
"joblibspark>=0.5.0",
|
||||
"joblib<=1.3.2",
|
||||
"nbconvert",
|
||||
"nbformat",
|
||||
"ipykernel",
|
||||
"pytorch-lightning<1.9.1", # test_forecast_panel
|
||||
"tensorboardX==2.6", # test_forecast_panel
|
||||
"requests<2.29.0", # https://github.com/docker/docker-py/issues/3113
|
||||
"pytorch-lightning", # test_forecast_panel
|
||||
"tensorboardX", # test_forecast_panel
|
||||
"requests", # https://github.com/docker/docker-py/issues/3113
|
||||
"packaging",
|
||||
"pydantic==1.10.9",
|
||||
"sympy",
|
||||
"wolframalpha",
|
||||
"dill", # a drop in replacement of pickle
|
||||
],
|
||||
"catboost": [
|
||||
"catboost>=0.26,<1.2; python_version<'3.11'",
|
||||
"catboost>=0.26,<=1.2.5; python_version>='3.11'",
|
||||
"catboost>=0.26",
|
||||
],
|
||||
"blendsearch": [
|
||||
"optuna>=2.8.0,<=3.6.1",
|
||||
"packaging",
|
||||
],
|
||||
"ray": [
|
||||
"ray[tune]~=1.13",
|
||||
"ray[tune]>=1.13,<2.5.0",
|
||||
],
|
||||
"azureml": [
|
||||
"azureml-mlflow",
|
||||
@@ -131,33 +130,21 @@ setuptools.setup(
|
||||
"seqeval",
|
||||
],
|
||||
"ts_forecast": [
|
||||
"holidays<0.14", # to prevent installation error for prophet
|
||||
"prophet>=1.0.1",
|
||||
"holidays",
|
||||
"prophet>=1.1.5",
|
||||
"statsmodels>=0.12.2",
|
||||
"hcrystalball==0.1.10",
|
||||
"hcrystalball>=0.1.10",
|
||||
],
|
||||
"forecast": [
|
||||
"holidays<0.14", # to prevent installation error for prophet
|
||||
"prophet>=1.0.1",
|
||||
"holidays",
|
||||
"prophet>=1.1.5",
|
||||
"statsmodels>=0.12.2",
|
||||
"hcrystalball==0.1.10",
|
||||
"pytorch-forecasting>=0.9.0; python_version<'3.11'",
|
||||
# "pytorch-forecasting==0.10.1; python_version=='3.11'",
|
||||
"pytorch-lightning==1.9.0",
|
||||
"tensorboardX==2.6",
|
||||
"hcrystalball>=0.1.10",
|
||||
"pytorch-forecasting>=0.10.4; python_version<'3.13'",
|
||||
"pytorch-lightning>=1.9.0",
|
||||
"tensorboardX>=2.6",
|
||||
],
|
||||
"benchmark": ["catboost>=0.26", "psutil==5.8.0", "xgboost==1.3.3", "pandas==1.1.4"],
|
||||
"openai": ["openai==0.27.8", "diskcache"],
|
||||
"autogen": ["openai==0.27.8", "diskcache", "termcolor"],
|
||||
"mathchat": ["openai==0.27.8", "diskcache", "termcolor", "sympy", "pydantic==1.10.9", "wolframalpha"],
|
||||
"retrievechat": [
|
||||
"openai==0.27.8",
|
||||
"diskcache",
|
||||
"termcolor",
|
||||
"chromadb",
|
||||
"tiktoken",
|
||||
"sentence_transformers",
|
||||
],
|
||||
"synapse": [
|
||||
"joblibspark>=0.5.0",
|
||||
"optuna>=2.8.0,<=3.6.1",
|
||||
@@ -170,10 +157,9 @@ setuptools.setup(
|
||||
"Operating System :: OS Independent",
|
||||
# Specify the Python versions you support here.
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
],
|
||||
python_requires=">=3.8",
|
||||
python_requires=">=3.10",
|
||||
)
|
||||
|
||||
@@ -17,6 +17,8 @@ from flaml import AutoML
|
||||
from flaml.automl.ml import sklearn_metric_loss_score
|
||||
from flaml.tune.spark.utils import check_spark
|
||||
|
||||
pytestmark = pytest.mark.spark
|
||||
|
||||
leaderboard = defaultdict(dict)
|
||||
|
||||
warnings.simplefilter(action="ignore")
|
||||
|
||||
@@ -477,7 +477,10 @@ def test_forecast_classification(budget=5):
|
||||
def get_stalliion_data():
|
||||
from pytorch_forecasting.data.examples import get_stallion_data
|
||||
|
||||
data = get_stallion_data()
|
||||
# data = get_stallion_data()
|
||||
data = pd.read_parquet(
|
||||
"https://raw.githubusercontent.com/sktime/pytorch-forecasting/refs/heads/main/examples/data/stallion.parquet"
|
||||
)
|
||||
# add time index - For datasets with no missing values, FLAML will automate this process
|
||||
data["time_idx"] = data["date"].dt.year * 12 + data["date"].dt.month
|
||||
data["time_idx"] -= data["time_idx"].min()
|
||||
|
||||
51
test/automl/test_max_iter_1.py
Normal file
51
test/automl/test_max_iter_1.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import mlflow
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from flaml import AutoML
|
||||
|
||||
|
||||
def test_max_iter_1():
|
||||
date_rng = pd.date_range(start="2024-01-01", periods=100, freq="H")
|
||||
X = pd.DataFrame({"ds": date_rng})
|
||||
y_train_24h = np.random.rand(len(X)) * 100
|
||||
|
||||
# AutoML
|
||||
settings = {
|
||||
"max_iter": 1,
|
||||
"estimator_list": ["xgboost", "lgbm"],
|
||||
"starting_points": {"xgboost": {}, "lgbm": {}},
|
||||
"task": "ts_forecast",
|
||||
"log_file_name": "test_max_iter_1.log",
|
||||
"seed": 41,
|
||||
"mlflow_exp_name": "TestExp-max_iter-1",
|
||||
"use_spark": False,
|
||||
"n_concurrent_trials": 1,
|
||||
"verbose": 1,
|
||||
"featurization": "off",
|
||||
"metric": "rmse",
|
||||
"mlflow_logging": True,
|
||||
}
|
||||
|
||||
automl = AutoML(**settings)
|
||||
|
||||
with mlflow.start_run(run_name="AutoMLModel-XGBoost-and-LGBM-max_iter_1"):
|
||||
automl.fit(
|
||||
X_train=X,
|
||||
y_train=y_train_24h,
|
||||
period=24,
|
||||
X_val=X,
|
||||
y_val=y_train_24h,
|
||||
split_ratio=0,
|
||||
force_cancel=False,
|
||||
)
|
||||
|
||||
assert automl.model is not None, "AutoML failed to return a model"
|
||||
assert automl.best_run_id is not None, "Best run ID should not be None with mlflow logging"
|
||||
|
||||
print("Best model:", automl.model)
|
||||
print("Best run ID:", automl.best_run_id)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_max_iter_1()
|
||||
@@ -10,6 +10,18 @@ from flaml import AutoML
|
||||
|
||||
|
||||
class TestMLFlowLoggingParam:
|
||||
def test_update_and_install_requirements(self):
|
||||
import mlflow
|
||||
from sklearn import tree
|
||||
|
||||
from flaml.fabric.mlflow import update_and_install_requirements
|
||||
|
||||
with mlflow.start_run(run_name="test") as run:
|
||||
sk_model = tree.DecisionTreeClassifier()
|
||||
mlflow.sklearn.log_model(sk_model, "model", registered_model_name="test")
|
||||
|
||||
update_and_install_requirements(run_id=run.info.run_id)
|
||||
|
||||
def test_should_start_new_run_by_default(self, automl_settings):
|
||||
with mlflow.start_run() as parent_run:
|
||||
automl = AutoML()
|
||||
|
||||
@@ -143,4 +143,5 @@ def test_prep():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_lrl2()
|
||||
test_prep()
|
||||
@@ -1,8 +1,23 @@
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
from minio.error import ServerError
|
||||
from openml.exceptions import OpenMLServerException
|
||||
|
||||
try:
|
||||
from minio.error import ServerError
|
||||
except ImportError:
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
from openml.exceptions import OpenMLServerException
|
||||
except ImportError:
|
||||
|
||||
class OpenMLServerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
from requests.exceptions import ChunkedEncodingError, SSLError
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ class TestLogging(unittest.TestCase):
|
||||
"keep_search_state": True,
|
||||
"learner_selector": "roundrobin",
|
||||
}
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, data_home="test")
|
||||
n = len(y_train) >> 1
|
||||
print(automl.model, automl.classes_, automl.predict(X_train))
|
||||
automl.fit(
|
||||
|
||||
@@ -47,7 +47,7 @@ class TestRegression(unittest.TestCase):
|
||||
"n_jobs": 1,
|
||||
"model_history": True,
|
||||
}
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, data_home="test")
|
||||
n = int(len(y_train) * 9 // 10)
|
||||
automl.fit(X_train=X_train[:n], y_train=y_train[:n], X_val=X_train[n:], y_val=y_train[n:], **automl_settings)
|
||||
assert automl._state.eval_method == "holdout"
|
||||
@@ -141,7 +141,7 @@ class TestRegression(unittest.TestCase):
|
||||
"n_concurrent_trials": 10,
|
||||
"hpo_method": hpo_method,
|
||||
}
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, data_home="test")
|
||||
try:
|
||||
automl_experiment.fit(X_train=X_train, y_train=y_train, **automl_settings)
|
||||
print(automl_experiment.predict(X_train))
|
||||
@@ -268,7 +268,7 @@ def test_reproducibility_of_regression_models(estimator: str):
|
||||
"skip_transform": True,
|
||||
"retrain_full": True,
|
||||
}
|
||||
X, y = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X, y = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
automl.fit(X_train=X, y_train=y, **automl_settings)
|
||||
best_model = automl.model
|
||||
assert best_model is not None
|
||||
@@ -314,7 +314,7 @@ def test_reproducibility_of_catboost_regression_model():
|
||||
"skip_transform": True,
|
||||
"retrain_full": True,
|
||||
}
|
||||
X, y = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X, y = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
automl.fit(X_train=X, y_train=y, **automl_settings)
|
||||
best_model = automl.model
|
||||
assert best_model is not None
|
||||
@@ -360,7 +360,7 @@ def test_reproducibility_of_lgbm_regression_model():
|
||||
"skip_transform": True,
|
||||
"retrain_full": True,
|
||||
}
|
||||
X, y = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X, y = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
automl.fit(X_train=X, y_train=y, **automl_settings)
|
||||
best_model = automl.model
|
||||
assert best_model is not None
|
||||
@@ -424,7 +424,7 @@ def test_reproducibility_of_underlying_regression_models(estimator: str):
|
||||
"skip_transform": True,
|
||||
"retrain_full": False,
|
||||
}
|
||||
X, y = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X, y = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
automl.fit(X_train=X, y_train=y, **automl_settings)
|
||||
best_model = automl.model
|
||||
assert best_model is not None
|
||||
|
||||
@@ -142,7 +142,7 @@ class TestScore:
|
||||
def test_regression(self):
|
||||
automl_experiment = AutoML()
|
||||
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, data_home="test")
|
||||
n = int(len(y_train) * 9 // 10)
|
||||
|
||||
for each_estimator in [
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
from sklearn.datasets import fetch_openml
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.datasets import fetch_openml, load_iris
|
||||
from sklearn.metrics import accuracy_score
|
||||
from sklearn.model_selection import GroupKFold, KFold, train_test_split
|
||||
|
||||
@@ -48,7 +50,7 @@ def test_time():
|
||||
_test(split_type="time")
|
||||
|
||||
|
||||
def test_groups():
|
||||
def test_groups_for_classification_task():
|
||||
from sklearn.externals._arff import ArffException
|
||||
|
||||
try:
|
||||
@@ -58,8 +60,6 @@ def test_groups():
|
||||
|
||||
X, y = load_wine(return_X_y=True)
|
||||
|
||||
import numpy as np
|
||||
|
||||
automl = AutoML()
|
||||
automl_settings = {
|
||||
"time_budget": 2,
|
||||
@@ -68,7 +68,7 @@ def test_groups():
|
||||
"model_history": True,
|
||||
"eval_method": "cv",
|
||||
"groups": np.random.randint(low=0, high=10, size=len(y)),
|
||||
"estimator_list": ["lgbm", "rf", "xgboost", "kneighbor"],
|
||||
"estimator_list": ["catboost", "lgbm", "rf", "xgboost", "kneighbor"],
|
||||
"learner_selector": "roundrobin",
|
||||
}
|
||||
automl.fit(X, y, **automl_settings)
|
||||
@@ -88,6 +88,72 @@ def test_groups():
|
||||
automl.fit(X, y, **automl_settings)
|
||||
|
||||
|
||||
def test_groups_for_regression_task():
|
||||
"""Append nonsensical groups to iris dataset and use it to test that GroupKFold works for regression tasks"""
|
||||
iris_dict_data = load_iris(as_frame=True) # numpy arrays
|
||||
iris_data = iris_dict_data["frame"] # pandas dataframe data + target
|
||||
|
||||
rng = np.random.default_rng(42)
|
||||
iris_data["cluster"] = rng.integers(
|
||||
low=0, high=5, size=iris_data.shape[0]
|
||||
) # np.random.randint(0, 5, iris_data.shape[0])
|
||||
|
||||
automl = AutoML()
|
||||
X = iris_data[["sepal length (cm)", "sepal width (cm)", "petal length (cm)"]].to_numpy()
|
||||
y = iris_data["petal width (cm)"]
|
||||
X_train, X_test, y_train, y_test, groups_train, groups_test = train_test_split(
|
||||
X, y, iris_data["cluster"], random_state=42
|
||||
)
|
||||
automl_settings = {
|
||||
"max_iter": 5,
|
||||
"time_budget": -1,
|
||||
"metric": "r2",
|
||||
"task": "regression",
|
||||
"estimator_list": ["lgbm", "rf", "xgboost", "kneighbor"],
|
||||
"eval_method": "cv",
|
||||
"split_type": "uniform",
|
||||
"groups": groups_train,
|
||||
}
|
||||
automl.fit(X_train, y_train, **automl_settings)
|
||||
|
||||
|
||||
def test_groups_with_sample_weights():
|
||||
"""Verifies that sample weights can be used with group splits i.e. that https://github.com/microsoft/FLAML/issues/1396 remains fixed"""
|
||||
iris_dict_data = load_iris(as_frame=True) # numpy arrays
|
||||
iris_data = iris_dict_data["frame"] # pandas dataframe data + target
|
||||
iris_data["cluster"] = np.random.randint(0, 5, iris_data.shape[0])
|
||||
automl = AutoML()
|
||||
|
||||
X = iris_data[["sepal length (cm)", "sepal width (cm)", "petal length (cm)"]].to_numpy()
|
||||
y = iris_data["petal width (cm)"]
|
||||
sample_weight = pd.Series(np.random.rand(X.shape[0]))
|
||||
(
|
||||
X_train,
|
||||
X_test,
|
||||
y_train,
|
||||
y_test,
|
||||
groups_train,
|
||||
groups_test,
|
||||
sample_weight_train,
|
||||
sample_weight_test,
|
||||
) = train_test_split(X, y, iris_data["cluster"], sample_weight, random_state=42)
|
||||
automl_settings = {
|
||||
"max_iter": 5,
|
||||
"time_budget": -1,
|
||||
"metric": "r2",
|
||||
"task": "regression",
|
||||
"log_file_name": "error.log",
|
||||
"log_type": "all",
|
||||
"estimator_list": ["lgbm"],
|
||||
"eval_method": "cv",
|
||||
"split_type": "group",
|
||||
"groups": groups_train,
|
||||
"sample_weight": sample_weight_train,
|
||||
}
|
||||
automl.fit(X_train, y_train, **automl_settings)
|
||||
assert automl.model is not None
|
||||
|
||||
|
||||
def test_stratified_groupkfold():
|
||||
from minio.error import ServerError
|
||||
from sklearn.model_selection import StratifiedGroupKFold
|
||||
@@ -108,6 +174,7 @@ def test_stratified_groupkfold():
|
||||
"split_type": splitter,
|
||||
"groups": X_train["Airline"],
|
||||
"estimator_list": [
|
||||
"catboost",
|
||||
"lgbm",
|
||||
"rf",
|
||||
"xgboost",
|
||||
@@ -203,4 +270,4 @@ def test_object():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_groups()
|
||||
test_groups_for_classification_task()
|
||||
|
||||
@@ -30,7 +30,7 @@ class TestTrainingLog(unittest.TestCase):
|
||||
"keep_search_state": True,
|
||||
"estimator_list": estimator_list,
|
||||
}
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, data_home="test")
|
||||
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
|
||||
# Check if the training log file is populated.
|
||||
self.assertTrue(os.path.exists(filename))
|
||||
|
||||
@@ -108,7 +108,14 @@ class TestWarmStart(unittest.TestCase):
|
||||
|
||||
def test_FLAML_sample_size_in_starting_points(self):
|
||||
from minio.error import ServerError
|
||||
from openml.exceptions import OpenMLServerException
|
||||
|
||||
try:
|
||||
from openml.exceptions import OpenMLServerException
|
||||
except ImportError:
|
||||
|
||||
class OpenMLServerException(Exception):
|
||||
pass
|
||||
|
||||
from requests.exceptions import ChunkedEncodingError, SSLError
|
||||
|
||||
from flaml import AutoML
|
||||
|
||||
BIN
test/cal_housing_py3.pkz
Normal file
BIN
test/cal_housing_py3.pkz
Normal file
Binary file not shown.
60
test/check_dependency.py
Normal file
60
test/check_dependency.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import subprocess
|
||||
from importlib.metadata import distributions
|
||||
|
||||
installed_libs = sorted(f"{dist.metadata['Name']}=={dist.version}" for dist in distributions())
|
||||
|
||||
first_tier_dependencies = [
|
||||
"numpy",
|
||||
"jupyter",
|
||||
"lightgbm",
|
||||
"xgboost",
|
||||
"scipy",
|
||||
"pandas",
|
||||
"scikit-learn",
|
||||
"thop",
|
||||
"pytest",
|
||||
"pytest-rerunfailures",
|
||||
"coverage",
|
||||
"pre-commit",
|
||||
"torch",
|
||||
"torchvision",
|
||||
"catboost",
|
||||
"rgf-python",
|
||||
"optuna",
|
||||
"openml",
|
||||
"statsmodels",
|
||||
"psutil",
|
||||
"dataclasses",
|
||||
"transformers[torch]",
|
||||
"transformers",
|
||||
"datasets",
|
||||
"evaluate",
|
||||
"nltk",
|
||||
"rouge_score",
|
||||
"hcrystalball",
|
||||
"seqeval",
|
||||
"pytorch-forecasting",
|
||||
"mlflow-skinny",
|
||||
"joblibspark",
|
||||
"joblib",
|
||||
"nbconvert",
|
||||
"nbformat",
|
||||
"ipykernel",
|
||||
"pytorch-lightning",
|
||||
"tensorboardX",
|
||||
"requests",
|
||||
"packaging",
|
||||
"dill",
|
||||
"ray",
|
||||
"prophet",
|
||||
]
|
||||
|
||||
|
||||
for lib in installed_libs:
|
||||
lib_name = lib.split("==")[0]
|
||||
if lib_name in first_tier_dependencies:
|
||||
print(lib)
|
||||
|
||||
# print current commit hash
|
||||
commit_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8").strip()
|
||||
print(f"Current commit hash: {commit_hash}")
|
||||
@@ -2,11 +2,24 @@ from typing import Any, Dict, List, Union
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from catboost import CatBoostClassifier, CatBoostRegressor, Pool
|
||||
import pytest
|
||||
from sklearn.metrics import f1_score, r2_score
|
||||
|
||||
try:
|
||||
from catboost import CatBoostClassifier, CatBoostRegressor, Pool
|
||||
except ImportError: # pragma: no cover
|
||||
CatBoostClassifier = None
|
||||
CatBoostRegressor = None
|
||||
Pool = None
|
||||
|
||||
def evaluate_cv_folds_with_underlying_model(X_train_all, y_train_all, kf, model: Any, task: str) -> pd.DataFrame:
|
||||
|
||||
def _is_catboost_model_type(model_type: type) -> bool:
|
||||
if CatBoostClassifier is not None and CatBoostRegressor is not None:
|
||||
return model_type is CatBoostClassifier or model_type is CatBoostRegressor
|
||||
return getattr(model_type, "__module__", "").startswith("catboost")
|
||||
|
||||
|
||||
def evaluate_cv_folds_with_underlying_model(X_train_all, y_train_all, kf, model: Any, task: str) -> List[float]:
|
||||
"""Mimic the FLAML CV process to calculate the metrics across each fold.
|
||||
|
||||
:param X_train_all: X training data
|
||||
@@ -17,7 +30,7 @@ def evaluate_cv_folds_with_underlying_model(X_train_all, y_train_all, kf, model:
|
||||
:return: An array containing the metrics
|
||||
"""
|
||||
rng = np.random.RandomState(2020)
|
||||
all_fold_metrics: List[Dict[str, Union[int, float]]] = []
|
||||
all_fold_metrics: List[float] = []
|
||||
for train_index, val_index in kf.split(X_train_all, y_train_all):
|
||||
X_train_split, y_train_split = X_train_all, y_train_all
|
||||
train_index = rng.permutation(train_index)
|
||||
@@ -25,9 +38,11 @@ def evaluate_cv_folds_with_underlying_model(X_train_all, y_train_all, kf, model:
|
||||
X_val = X_train_split.iloc[val_index]
|
||||
y_train, y_val = y_train_split[train_index], y_train_split[val_index]
|
||||
model_type = type(model)
|
||||
if model_type is not CatBoostClassifier and model_type is not CatBoostRegressor:
|
||||
if not _is_catboost_model_type(model_type):
|
||||
model.fit(X_train, y_train)
|
||||
else:
|
||||
if Pool is None:
|
||||
pytest.skip("catboost is not installed")
|
||||
use_best_model = True
|
||||
n = max(int(len(y_train) * 0.9), len(y_train) - 1000) if use_best_model else len(y_train)
|
||||
X_tr, y_tr = (X_train)[:n], y_train[:n]
|
||||
@@ -38,5 +53,5 @@ def evaluate_cv_folds_with_underlying_model(X_train_all, y_train_all, kf, model:
|
||||
reproduced_metric = 1 - f1_score(y_val, y_pred_classes)
|
||||
else:
|
||||
reproduced_metric = 1 - r2_score(y_val, y_pred_classes)
|
||||
all_fold_metrics.append(reproduced_metric)
|
||||
all_fold_metrics.append(float(reproduced_metric))
|
||||
return all_fold_metrics
|
||||
|
||||
@@ -60,7 +60,7 @@ def test_housing(as_frame=True):
|
||||
"starting_points": "data",
|
||||
"max_iter": 0,
|
||||
}
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=as_frame)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=as_frame, data_home="test")
|
||||
automl.fit(X_train, y_train, **automl_settings)
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ def test_suggest_classification():
|
||||
|
||||
def test_suggest_regression():
|
||||
location = "test/default"
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
suggested = suggest_hyperparams("regression", X_train, y_train, "lgbm", location=location)
|
||||
print(suggested)
|
||||
suggested = preprocess_and_suggest_hyperparams("regression", X_train, y_train, "xgboost", location=location)
|
||||
@@ -137,7 +137,7 @@ def test_rf():
|
||||
print(rf)
|
||||
|
||||
location = "test/default"
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
rf = RandomForestRegressor(default_location=location)
|
||||
rf.fit(X_train[:100], y_train[:100])
|
||||
rf.predict(X_train)
|
||||
@@ -155,7 +155,7 @@ def test_extratrees():
|
||||
print(classifier)
|
||||
|
||||
location = "test/default"
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
regressor = ExtraTreesRegressor(default_location=location)
|
||||
regressor.fit(X_train[:100], y_train[:100])
|
||||
regressor.predict(X_train)
|
||||
@@ -175,7 +175,7 @@ def test_lgbm():
|
||||
print(classifier.classes_)
|
||||
|
||||
location = "test/default"
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
regressor = LGBMRegressor(default_location=location)
|
||||
regressor.fit(X_train, y_train)
|
||||
regressor.predict(X_train)
|
||||
@@ -194,7 +194,7 @@ def test_xgboost():
|
||||
print(classifier.classes_)
|
||||
|
||||
location = "test/default"
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
regressor = XGBRegressor(default_location=location)
|
||||
regressor.fit(X_train[:100], y_train[:100])
|
||||
regressor.predict(X_train)
|
||||
|
||||
@@ -24,6 +24,8 @@ model_path_list = [
|
||||
if sys.platform.startswith("darwin") and sys.version_info[0] == 3 and sys.version_info[1] == 11:
|
||||
pytest.skip("skipping Python 3.11 on MacOS", allow_module_level=True)
|
||||
|
||||
pytestmark = pytest.mark.spark # set to spark as parallel testing raised RuntimeError
|
||||
|
||||
|
||||
def test_switch_1_1():
|
||||
data_idx, model_path_idx = 0, 0
|
||||
|
||||
@@ -5,6 +5,8 @@ import sys
|
||||
import pytest
|
||||
from utils import get_automl_settings, get_toy_data_seqclassification
|
||||
|
||||
pytestmark = pytest.mark.spark # set to spark as parallel testing raised MlflowException of changing parameter
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform in ["darwin", "win32"], reason="do not run on mac os or windows")
|
||||
def test_cv():
|
||||
|
||||
@@ -10,6 +10,10 @@ from flaml.default import portfolio
|
||||
if sys.platform.startswith("darwin") and sys.version_info[0] == 3 and sys.version_info[1] == 11:
|
||||
pytest.skip("skipping Python 3.11 on MacOS", allow_module_level=True)
|
||||
|
||||
pytestmark = (
|
||||
pytest.mark.spark
|
||||
) # set to spark as parallel testing raised ValueError: Feature NonExisting not implemented.
|
||||
|
||||
|
||||
def pop_args(fit_kwargs):
|
||||
fit_kwargs.pop("max_iter", None)
|
||||
@@ -26,21 +30,33 @@ def test_build_portfolio(path="./test/nlp/default", strategy="greedy"):
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="do not run on windows")
|
||||
def test_starting_point_not_in_search_space():
|
||||
from flaml import AutoML
|
||||
"""Regression test for invalid starting points and custom_hp.
|
||||
|
||||
This test must not require network access to Hugging Face.
|
||||
"""
|
||||
|
||||
"""
|
||||
test starting_points located outside of the search space, and custom_hp is not set
|
||||
"""
|
||||
from flaml.automl.state import SearchState
|
||||
from flaml.automl.task.factory import task_factory
|
||||
|
||||
this_estimator_name = "transformer"
|
||||
X_train, y_train, X_val, y_val, _ = get_toy_data_seqclassification()
|
||||
X_train, y_train, _, _, _ = get_toy_data_seqclassification()
|
||||
task = task_factory("seq-classification", X_train, y_train)
|
||||
estimator_class = task.estimator_class_from_str(this_estimator_name)
|
||||
estimator_class.init()
|
||||
|
||||
automl = AutoML()
|
||||
automl_settings = get_automl_settings(estimator_name=this_estimator_name)
|
||||
|
||||
automl_settings["starting_points"] = {this_estimator_name: [{"learning_rate": 2e-3}]}
|
||||
|
||||
automl.fit(X_train, y_train, **automl_settings)
|
||||
assert automl._search_states[this_estimator_name].init_config[0]["learning_rate"] != 2e-3
|
||||
# SearchState is where invalid starting points are filtered out when max_iter > 1.
|
||||
search_state = SearchState(
|
||||
learner_class=estimator_class,
|
||||
data=X_train,
|
||||
task=task,
|
||||
starting_point={"learning_rate": 2e-3},
|
||||
max_iter=3,
|
||||
budget=10,
|
||||
)
|
||||
assert search_state.init_config and search_state.init_config[0].get("learning_rate") != 2e-3
|
||||
|
||||
"""
|
||||
test starting_points located outside of the search space, and custom_hp is set
|
||||
@@ -48,39 +64,60 @@ def test_starting_point_not_in_search_space():
|
||||
|
||||
from flaml import tune
|
||||
|
||||
X_train, y_train, X_val, y_val, _ = get_toy_data_seqclassification()
|
||||
X_train, y_train, _, _, _ = get_toy_data_seqclassification()
|
||||
|
||||
this_estimator_name = "transformer_ms"
|
||||
automl = AutoML()
|
||||
automl_settings = get_automl_settings(estimator_name=this_estimator_name)
|
||||
task = task_factory("seq-classification", X_train, y_train)
|
||||
estimator_class = task.estimator_class_from_str(this_estimator_name)
|
||||
estimator_class.init()
|
||||
|
||||
automl_settings["custom_hp"] = {
|
||||
this_estimator_name: {
|
||||
"model_path": {
|
||||
"domain": "albert-base-v2",
|
||||
},
|
||||
"learning_rate": {
|
||||
"domain": tune.choice([1e-4, 1e-5]),
|
||||
},
|
||||
"per_device_train_batch_size": {
|
||||
"domain": 2,
|
||||
},
|
||||
}
|
||||
custom_hp = {
|
||||
"model_path": {
|
||||
"domain": "albert-base-v2",
|
||||
},
|
||||
"learning_rate": {
|
||||
"domain": tune.choice([1e-4, 1e-5]),
|
||||
},
|
||||
"per_device_train_batch_size": {
|
||||
"domain": 2,
|
||||
},
|
||||
}
|
||||
automl_settings["starting_points"] = "data:test/nlp/default/"
|
||||
|
||||
automl.fit(X_train, y_train, **automl_settings)
|
||||
assert len(automl._search_states[this_estimator_name].init_config[0]) == len(
|
||||
automl._search_states[this_estimator_name]._search_space_domain
|
||||
) - len(automl_settings["custom_hp"][this_estimator_name]), (
|
||||
# Simulate a suggested starting point (e.g. from portfolio) which becomes invalid
|
||||
# after custom_hp constrains the space.
|
||||
invalid_starting_points = [
|
||||
{
|
||||
"learning_rate": 1e-5,
|
||||
"num_train_epochs": 1.0,
|
||||
"per_device_train_batch_size": 8,
|
||||
"seed": 43,
|
||||
"global_max_steps": 100,
|
||||
"model_path": "google/electra-base-discriminator",
|
||||
}
|
||||
]
|
||||
|
||||
search_state = SearchState(
|
||||
learner_class=estimator_class,
|
||||
data=X_train,
|
||||
task=task,
|
||||
starting_point=invalid_starting_points,
|
||||
custom_hp=custom_hp,
|
||||
max_iter=3,
|
||||
budget=10,
|
||||
)
|
||||
|
||||
assert search_state.init_config, "Expected a non-empty init_config list"
|
||||
init_config0 = search_state.init_config[0]
|
||||
assert init_config0 is not None
|
||||
assert len(init_config0) == len(search_state._search_space_domain) - len(custom_hp), (
|
||||
"The search space is updated with the custom_hp on {} hyperparameters of "
|
||||
"the specified estimator without an initial value. Thus a valid init config "
|
||||
"should only contain the cardinality of the search space minus {}".format(
|
||||
len(automl_settings["custom_hp"][this_estimator_name]),
|
||||
len(automl_settings["custom_hp"][this_estimator_name]),
|
||||
len(custom_hp),
|
||||
len(custom_hp),
|
||||
)
|
||||
)
|
||||
assert automl._search_states[this_estimator_name].search_space["model_path"] == "albert-base-v2"
|
||||
assert search_state.search_space["model_path"] == "albert-base-v2"
|
||||
|
||||
if os.path.exists("test/data/output/"):
|
||||
try:
|
||||
@@ -102,7 +139,13 @@ def test_points_to_evaluate():
|
||||
|
||||
automl_settings["custom_hp"] = {"transformer_ms": {"model_path": {"domain": "google/electra-small-discriminator"}}}
|
||||
|
||||
automl.fit(X_train, y_train, **automl_settings)
|
||||
try:
|
||||
automl.fit(X_train, y_train, **automl_settings)
|
||||
except OSError as e:
|
||||
message = str(e)
|
||||
if "Too Many Requests" in message or "rate limit" in message.lower():
|
||||
pytest.skip(f"Skipping HF model load/training: {message}")
|
||||
raise
|
||||
|
||||
if os.path.exists("test/data/output/"):
|
||||
try:
|
||||
@@ -137,7 +180,14 @@ def test_zero_shot_nomodel():
|
||||
fit_kwargs = automl_settings.pop("fit_kwargs_by_estimator", {}).get(estimator_name)
|
||||
fit_kwargs.update(automl_settings)
|
||||
pop_args(fit_kwargs)
|
||||
model.fit(X_train, y_train, **fit_kwargs)
|
||||
|
||||
try:
|
||||
model.fit(X_train, y_train, **fit_kwargs)
|
||||
except OSError as e:
|
||||
message = str(e)
|
||||
if "Too Many Requests" in message or "rate limit" in message.lower():
|
||||
pytest.skip(f"Skipping HF model load/training: {message}")
|
||||
raise
|
||||
|
||||
if os.path.exists("test/data/output/"):
|
||||
try:
|
||||
|
||||
@@ -7,7 +7,7 @@ from sklearn.model_selection import train_test_split
|
||||
from flaml import tune
|
||||
from flaml.automl.model import LGBMEstimator
|
||||
|
||||
data = fetch_california_housing(return_X_y=False, as_frame=True)
|
||||
data = fetch_california_housing(return_X_y=False, as_frame=True, data_home="test")
|
||||
X, y = data.data, data.target
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
|
||||
X_train_ref = ray.put(X_train)
|
||||
|
||||
@@ -11,7 +11,7 @@ automl_settings = {
|
||||
"task": "regression",
|
||||
"log_file_name": "test/california.log",
|
||||
}
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, data_home="test")
|
||||
# Train with labeled input data
|
||||
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
|
||||
print(automl.model)
|
||||
|
||||
@@ -3,11 +3,13 @@ import sys
|
||||
import warnings
|
||||
|
||||
import mlflow
|
||||
import numpy as np
|
||||
import pytest
|
||||
import sklearn.datasets as skds
|
||||
from packaging.version import Version
|
||||
|
||||
from flaml import AutoML
|
||||
from flaml.automl.data import auto_convert_dtypes_pandas, auto_convert_dtypes_spark, get_random_dataframe
|
||||
from flaml.tune.spark.utils import check_spark
|
||||
|
||||
warnings.simplefilter(action="ignore")
|
||||
@@ -58,7 +60,7 @@ if sys.version_info >= (3, 11):
|
||||
else:
|
||||
skip_py311 = False
|
||||
|
||||
pytestmark = pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests.")
|
||||
pytestmark = [pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests."), pytest.mark.spark]
|
||||
|
||||
|
||||
def _test_spark_synapseml_lightgbm(spark=None, task="classification"):
|
||||
@@ -296,11 +298,88 @@ def _test_spark_large_df():
|
||||
print("time cost in minutes: ", (end_time - start_time) / 60)
|
||||
|
||||
|
||||
def test_get_random_dataframe():
|
||||
# Test with default parameters
|
||||
df = get_random_dataframe(n_rows=50, ratio_none=0.2, seed=123)
|
||||
assert df.shape == (50, 14) # Default is 200 rows and 14 columns
|
||||
|
||||
# Test column types
|
||||
assert "timestamp" in df.columns and np.issubdtype(df["timestamp"].dtype, np.datetime64)
|
||||
assert "id" in df.columns and np.issubdtype(df["id"].dtype, np.integer)
|
||||
assert "score" in df.columns and np.issubdtype(df["score"].dtype, np.floating)
|
||||
assert "category" in df.columns and df["category"].dtype.name == "category"
|
||||
|
||||
|
||||
def test_auto_convert_dtypes_pandas():
|
||||
# Create a test DataFrame with various types
|
||||
import pandas as pd
|
||||
|
||||
test_df = pd.DataFrame(
|
||||
{
|
||||
"int_col": ["1", "2", "3", "4", "5", "6", "6"],
|
||||
"float_col": ["1.1", "2.2", "3.3", "NULL", "5.5", "6.6", "6.6"],
|
||||
"date_col": ["2021-01-01", "2021-02-01", "NA", "2021-04-01", "2021-05-01", "2021-06-01", "2021-06-01"],
|
||||
"cat_col": ["A", "B", "A", "A", "B", "A", "B"],
|
||||
"string_col": ["text1", "text2", "text3", "text4", "text5", "text6", "text7"],
|
||||
}
|
||||
)
|
||||
|
||||
# Convert dtypes
|
||||
converted_df, schema = auto_convert_dtypes_pandas(test_df)
|
||||
|
||||
# Check conversions
|
||||
assert schema["int_col"] == "int"
|
||||
assert schema["float_col"] == "double"
|
||||
assert schema["date_col"] == "timestamp"
|
||||
assert schema["cat_col"] == "category"
|
||||
assert schema["string_col"] == "string"
|
||||
|
||||
|
||||
def test_auto_convert_dtypes_spark():
|
||||
"""Test auto_convert_dtypes_spark function with various data types."""
|
||||
import pandas as pd
|
||||
|
||||
# Create a test DataFrame with various types
|
||||
test_pdf = pd.DataFrame(
|
||||
{
|
||||
"int_col": ["1", "2", "3", "4", "NA"],
|
||||
"float_col": ["1.1", "2.2", "3.3", "NULL", "5.5"],
|
||||
"date_col": ["2021-01-01", "2021-02-01", "NA", "2021-04-01", "2021-05-01"],
|
||||
"cat_col": ["A", "B", "A", "C", "B"],
|
||||
"string_col": ["text1", "text2", "text3", "text4", "text5"],
|
||||
}
|
||||
)
|
||||
|
||||
# Convert pandas DataFrame to Spark DataFrame
|
||||
test_df = spark.createDataFrame(test_pdf)
|
||||
|
||||
# Convert dtypes
|
||||
converted_df, schema = auto_convert_dtypes_spark(test_df)
|
||||
|
||||
# Check conversions
|
||||
assert schema["int_col"] == "int"
|
||||
assert schema["float_col"] == "double"
|
||||
assert schema["date_col"] == "timestamp"
|
||||
assert schema["cat_col"] == "string" # Conceptual category in schema
|
||||
assert schema["string_col"] == "string"
|
||||
|
||||
# Verify the actual data types from the Spark DataFrame
|
||||
spark_dtypes = dict(converted_df.dtypes)
|
||||
assert spark_dtypes["int_col"] == "int"
|
||||
assert spark_dtypes["float_col"] == "double"
|
||||
assert spark_dtypes["date_col"] == "timestamp"
|
||||
assert spark_dtypes["cat_col"] == "string" # In Spark, categories are still strings
|
||||
assert spark_dtypes["string_col"] == "string"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_spark_synapseml_classification()
|
||||
test_spark_synapseml_regression()
|
||||
test_spark_synapseml_rank()
|
||||
test_spark_input_df()
|
||||
test_get_random_dataframe()
|
||||
test_auto_convert_dtypes_pandas()
|
||||
test_auto_convert_dtypes_spark()
|
||||
|
||||
# import cProfile
|
||||
# import pstats
|
||||
|
||||
@@ -25,7 +25,7 @@ os.environ["FLAML_MAX_CONCURRENT"] = "2"
|
||||
spark_available, _ = check_spark()
|
||||
skip_spark = not spark_available
|
||||
|
||||
pytestmark = pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests.")
|
||||
pytestmark = [pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests."), pytest.mark.spark]
|
||||
|
||||
|
||||
def test_parallel_xgboost(hpo_method=None, data_size=1000):
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
from sklearn.datasets import load_wine
|
||||
|
||||
from flaml import AutoML
|
||||
@@ -24,6 +25,8 @@ if os.path.exists(os.path.join(os.getcwd(), "test", "spark", "custom_mylearner.p
|
||||
else:
|
||||
skip_my_learner = True
|
||||
|
||||
pytestmark = pytest.mark.spark
|
||||
|
||||
|
||||
class TestEnsemble(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
|
||||
@@ -9,7 +9,7 @@ from flaml.tune.spark.utils import check_spark
|
||||
spark_available, _ = check_spark()
|
||||
skip_spark = not spark_available
|
||||
|
||||
pytestmark = pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests.")
|
||||
pytestmark = [pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests."), pytest.mark.spark]
|
||||
|
||||
os.environ["FLAML_MAX_CONCURRENT"] = "2"
|
||||
|
||||
@@ -22,7 +22,7 @@ def base_automl(n_concurrent_trials=1, use_ray=False, use_spark=False, verbose=0
|
||||
except (ServerError, Exception):
|
||||
from sklearn.datasets import fetch_california_housing
|
||||
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True)
|
||||
X_train, y_train = fetch_california_housing(return_X_y=True, data_home="test")
|
||||
automl = AutoML()
|
||||
settings = {
|
||||
"time_budget": 3, # total running time in seconds
|
||||
|
||||
@@ -21,6 +21,7 @@ try:
|
||||
from pyspark.ml.feature import VectorAssembler
|
||||
except ImportError:
|
||||
pass
|
||||
pytestmark = pytest.mark.spark
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
skip_spark = importlib.util.find_spec("pyspark") is None
|
||||
|
||||
@@ -2,6 +2,7 @@ import os
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import scipy.sparse
|
||||
from sklearn.datasets import load_iris, load_wine
|
||||
|
||||
@@ -12,6 +13,7 @@ from flaml.tune.spark.utils import check_spark
|
||||
|
||||
spark_available, _ = check_spark()
|
||||
skip_spark = not spark_available
|
||||
pytestmark = pytest.mark.spark
|
||||
|
||||
os.environ["FLAML_MAX_CONCURRENT"] = "2"
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from flaml.tune.spark.utils import check_spark
|
||||
spark_available, _ = check_spark()
|
||||
skip_spark = not spark_available
|
||||
|
||||
pytestmark = pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests.")
|
||||
pytestmark = [pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests."), pytest.mark.spark]
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
os.environ["FLAML_MAX_CONCURRENT"] = "2"
|
||||
|
||||
@@ -25,7 +25,7 @@ try:
|
||||
except ImportError:
|
||||
skip_spark = True
|
||||
|
||||
pytestmark = pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests.")
|
||||
pytestmark = [pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests."), pytest.mark.spark]
|
||||
|
||||
|
||||
def test_overtime():
|
||||
|
||||
@@ -2,8 +2,23 @@ import os
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
from minio.error import ServerError
|
||||
from openml.exceptions import OpenMLServerException
|
||||
|
||||
try:
|
||||
from minio.error import ServerError
|
||||
except ImportError:
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
from openml.exceptions import OpenMLServerException
|
||||
except ImportError:
|
||||
|
||||
class OpenMLServerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
from requests.exceptions import ChunkedEncodingError, SSLError
|
||||
|
||||
from flaml.tune.spark.utils import check_spark
|
||||
@@ -11,7 +26,7 @@ from flaml.tune.spark.utils import check_spark
|
||||
spark_available, _ = check_spark()
|
||||
skip_spark = not spark_available
|
||||
|
||||
pytestmark = pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests.")
|
||||
pytestmark = [pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests."), pytest.mark.spark]
|
||||
|
||||
os.environ["FLAML_MAX_CONCURRENT"] = "2"
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ from flaml.tune.spark.utils import check_spark
|
||||
spark_available, _ = check_spark()
|
||||
skip_spark = not spark_available
|
||||
|
||||
pytestmark = pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests.")
|
||||
pytestmark = [pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests."), pytest.mark.spark]
|
||||
|
||||
os.environ["FLAML_MAX_CONCURRENT"] = "2"
|
||||
X, y = load_breast_cancer(return_X_y=True)
|
||||
|
||||
@@ -36,7 +36,7 @@ except ImportError:
|
||||
print("Spark is not installed. Skip all spark tests.")
|
||||
skip_spark = True
|
||||
|
||||
pytestmark = pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests.")
|
||||
pytestmark = [pytest.mark.skipif(skip_spark, reason="Spark is not installed. Skip all spark tests."), pytest.mark.spark]
|
||||
|
||||
|
||||
def test_with_parameters_spark():
|
||||
|
||||
@@ -5,17 +5,38 @@ import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import openml
|
||||
|
||||
try:
|
||||
import openml
|
||||
except ImportError:
|
||||
openml = None
|
||||
import pandas as pd
|
||||
import pytest
|
||||
import scipy.sparse
|
||||
from minio.error import ServerError
|
||||
|
||||
try:
|
||||
from minio.error import ServerError
|
||||
except ImportError:
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
from requests.exceptions import SSLError
|
||||
from sklearn.metrics import mean_absolute_error, mean_squared_error
|
||||
|
||||
from flaml import AutoVW
|
||||
from flaml.tune import loguniform, polynomial_expansion_set
|
||||
|
||||
try:
|
||||
from vowpalwabbit import pyvw
|
||||
except ImportError:
|
||||
skip_vw_test = True
|
||||
else:
|
||||
skip_vw_test = False
|
||||
|
||||
pytest.skip("skipping if no openml", allow_module_level=True) if openml is None else None
|
||||
|
||||
VW_DS_DIR = "test/data/"
|
||||
NS_LIST = list(string.ascii_lowercase) + list(string.ascii_uppercase)
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -351,14 +372,9 @@ def get_vw_tuning_problem(tuning_hp="NamesapceInteraction"):
|
||||
return vw_oml_problem_args, vw_online_aml_problem
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
"3.10" in sys.version or "3.11" in sys.version,
|
||||
reason="do not run on py >= 3.10",
|
||||
)
|
||||
@pytest.mark.skipif(skip_vw_test, reason="vowpalwabbit not installed")
|
||||
class TestAutoVW(unittest.TestCase):
|
||||
def test_vw_oml_problem_and_vanilla_vw(self):
|
||||
from vowpalwabbit import pyvw
|
||||
|
||||
try:
|
||||
vw_oml_problem_args, vw_online_aml_problem = get_vw_tuning_problem()
|
||||
except (SSLError, ServerError, Exception) as e:
|
||||
|
||||
@@ -59,6 +59,17 @@ def _test_hf_data():
|
||||
except requests.exceptions.ConnectionError:
|
||||
return
|
||||
|
||||
# Tests will only run if there is a GPU available
|
||||
try:
|
||||
import ray
|
||||
|
||||
pg = ray.util.placement_group([{"CPU": 1, "GPU": 1}])
|
||||
|
||||
if not pg.wait(timeout_seconds=10): # Wait 10 seconds for resources
|
||||
raise RuntimeError("No available node types can fulfill resource request!")
|
||||
except RuntimeError:
|
||||
return
|
||||
|
||||
custom_sent_keys = ["sentence1", "sentence2"]
|
||||
label_key = "label"
|
||||
|
||||
|
||||
@@ -6,12 +6,12 @@ from sklearn.model_selection import train_test_split
|
||||
from flaml import tune
|
||||
from flaml.automl.model import LGBMEstimator
|
||||
|
||||
data = fetch_california_housing(return_X_y=False, as_frame=True)
|
||||
data = fetch_california_housing(return_X_y=False, as_frame=True, data_home="test")
|
||||
df, X, y = data.frame, data.data, data.target
|
||||
df_train, _, X_train, X_test, _, y_test = train_test_split(df, X, y, test_size=0.33, random_state=42)
|
||||
csv_file_name = "test/housing.csv"
|
||||
df_train.to_csv(csv_file_name, index=False)
|
||||
# X, y = fetch_california_housing(return_X_y=True, as_frame=True)
|
||||
# X, y = fetch_california_housing(return_X_y=True, as_frame=True, data_home="test")
|
||||
# X_train, X_test, y_train, y_test = train_test_split(
|
||||
# X, y, test_size=0.33, random_state=42
|
||||
# )
|
||||
|
||||
@@ -80,11 +80,38 @@ from flaml import AutoML
|
||||
from sklearn.datasets import load_iris
|
||||
|
||||
X, y = load_iris(return_X_y=True)
|
||||
|
||||
automl = AutoML(settings={"time_budget": 3})
|
||||
settings = {"time_budget": 3}
|
||||
automl = AutoML(**settings)
|
||||
automl.fit(X, y)
|
||||
|
||||
print(f"{automl.best_estimator=}")
|
||||
print(f"{automl.best_config=}")
|
||||
print(f"params for best estimator: {automl.model.config2params(automl.best_config)}")
|
||||
```
|
||||
|
||||
If the automl instance is not accessible and you've the `best_config`. You can also convert it with below code:
|
||||
|
||||
```python
|
||||
from flaml.automl.task.factory import task_factory
|
||||
|
||||
task = "classification"
|
||||
best_estimator = "rf"
|
||||
best_config = {
|
||||
"n_estimators": 15,
|
||||
"max_features": 0.35807183923834934,
|
||||
"max_leaves": 12,
|
||||
"criterion": "gini",
|
||||
}
|
||||
|
||||
model_class = task_factory(task).estimator_class_from_str(best_estimator)(task=task)
|
||||
best_params = model_class.config2params(best_config)
|
||||
```
|
||||
|
||||
Then you can use it to train the sklearn estimators directly:
|
||||
|
||||
```python
|
||||
from sklearn.ensemble import RandomForestClassifier
|
||||
|
||||
model = RandomForestClassifier(**best_params)
|
||||
model.fit(X, y)
|
||||
```
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
"name": "website",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"resolutions" :{
|
||||
"nth-check":"2.0.1",
|
||||
"trim":"0.0.3",
|
||||
"resolutions": {
|
||||
"nth-check": "2.0.1",
|
||||
"trim": "0.0.3",
|
||||
"got": "11.8.5",
|
||||
"node-forge": "1.3.0",
|
||||
"minimatch": "3.0.5",
|
||||
@@ -12,7 +12,7 @@
|
||||
"eta": "2.0.0",
|
||||
"@sideway/formula": "3.0.1",
|
||||
"http-cache-semantics": "4.1.1"
|
||||
},
|
||||
},
|
||||
"scripts": {
|
||||
"docusaurus": "docusaurus",
|
||||
"start": "docusaurus start",
|
||||
@@ -33,13 +33,13 @@
|
||||
"clsx": "^1.1.1",
|
||||
"file-loader": "^6.2.0",
|
||||
"hast-util-is-element": "1.1.0",
|
||||
"minimatch": "3.0.5",
|
||||
"react": "^17.0.1",
|
||||
"react-dom": "^17.0.1",
|
||||
"rehype-katex": "4",
|
||||
"remark-math": "3",
|
||||
"trim": "^0.0.3",
|
||||
"url-loader": "^4.1.1",
|
||||
"minimatch": "3.0.5"
|
||||
"url-loader": "^4.1.1"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
|
||||
@@ -153,6 +153,15 @@
|
||||
"@babel/highlight" "^7.23.4"
|
||||
chalk "^2.4.2"
|
||||
|
||||
"@babel/code-frame@^7.26.2":
|
||||
version "7.26.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85"
|
||||
integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==
|
||||
dependencies:
|
||||
"@babel/helper-validator-identifier" "^7.25.9"
|
||||
js-tokens "^4.0.0"
|
||||
picocolors "^1.0.0"
|
||||
|
||||
"@babel/compat-data@^7.17.7", "@babel/compat-data@^7.20.0", "@babel/compat-data@^7.20.1":
|
||||
version "7.20.1"
|
||||
resolved "https://registry.npmmirror.com/@babel/compat-data/-/compat-data-7.20.1.tgz#f2e6ef7790d8c8dbf03d379502dcc246dcce0b30"
|
||||
@@ -429,6 +438,11 @@
|
||||
resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz#9478c707febcbbe1ddb38a3d91a2e054ae622d83"
|
||||
integrity sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==
|
||||
|
||||
"@babel/helper-string-parser@^7.25.9":
|
||||
version "7.25.9"
|
||||
resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz#1aabb72ee72ed35789b4bbcad3ca2862ce614e8c"
|
||||
integrity sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==
|
||||
|
||||
"@babel/helper-validator-identifier@^7.18.6", "@babel/helper-validator-identifier@^7.19.1":
|
||||
version "7.19.1"
|
||||
resolved "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz#7eea834cf32901ffdc1a7ee555e2f9c27e249ca2"
|
||||
@@ -439,6 +453,11 @@
|
||||
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0"
|
||||
integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==
|
||||
|
||||
"@babel/helper-validator-identifier@^7.25.9":
|
||||
version "7.25.9"
|
||||
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz#24b64e2c3ec7cd3b3c547729b8d16871f22cbdc7"
|
||||
integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==
|
||||
|
||||
"@babel/helper-validator-option@^7.18.6":
|
||||
version "7.18.6"
|
||||
resolved "https://registry.npmmirror.com/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz#bf0d2b5a509b1f336099e4ff36e1a63aa5db4db8"
|
||||
@@ -455,13 +474,12 @@
|
||||
"@babel/types" "^7.19.0"
|
||||
|
||||
"@babel/helpers@^7.12.5", "@babel/helpers@^7.20.1":
|
||||
version "7.20.1"
|
||||
resolved "https://registry.npmmirror.com/@babel/helpers/-/helpers-7.20.1.tgz#2ab7a0fcb0a03b5bf76629196ed63c2d7311f4c9"
|
||||
integrity sha512-J77mUVaDTUJFZ5BpP6mMn6OIl3rEWymk2ZxDBQJUG3P+PbmyMcF3bYWvz0ma69Af1oobDqT/iAsvzhB58xhQUg==
|
||||
version "7.26.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.26.10.tgz#6baea3cd62ec2d0c1068778d63cb1314f6637384"
|
||||
integrity sha512-UPYc3SauzZ3JGgj87GgZ89JVdC5dj0AoetR5Bw6wj4niittNyFh6+eOGonYvJ1ao6B8lEa3Q3klS7ADZ53bc5g==
|
||||
dependencies:
|
||||
"@babel/template" "^7.18.10"
|
||||
"@babel/traverse" "^7.20.1"
|
||||
"@babel/types" "^7.20.0"
|
||||
"@babel/template" "^7.26.9"
|
||||
"@babel/types" "^7.26.10"
|
||||
|
||||
"@babel/highlight@^7.18.6":
|
||||
version "7.18.6"
|
||||
@@ -491,6 +509,13 @@
|
||||
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.6.tgz#ba1c9e512bda72a47e285ae42aff9d2a635a9e3b"
|
||||
integrity sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ==
|
||||
|
||||
"@babel/parser@^7.26.9":
|
||||
version "7.26.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.10.tgz#e9bdb82f14b97df6569b0b038edd436839c57749"
|
||||
integrity sha512-6aQR2zGE/QFi8JpDLjUZEPYOs7+mhKXm86VaKFiLP35JQwQb6bwUE+XbvkH0EptsYhbNBSUGaUBLKqxH1xSgsA==
|
||||
dependencies:
|
||||
"@babel/types" "^7.26.10"
|
||||
|
||||
"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.18.6":
|
||||
version "7.18.6"
|
||||
resolved "https://registry.npmmirror.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz#da5b8f9a580acdfbe53494dba45ea389fb09a4d2"
|
||||
@@ -1196,19 +1221,19 @@
|
||||
"@babel/plugin-transform-typescript" "^7.18.6"
|
||||
|
||||
"@babel/runtime-corejs3@^7.15.4":
|
||||
version "7.20.1"
|
||||
resolved "https://registry.npmmirror.com/@babel/runtime-corejs3/-/runtime-corejs3-7.20.1.tgz#d0775a49bb5fba77e42cbb7276c9955c7b05af8d"
|
||||
integrity sha512-CGulbEDcg/ND1Im7fUNRZdGXmX2MTWVVZacQi/6DiKE5HNwZ3aVTm5PV4lO8HHz0B2h8WQyvKKjbX5XgTtydsg==
|
||||
version "7.26.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.26.10.tgz#5a3185ca2813f8de8ae68622572086edf5cf51f2"
|
||||
integrity sha512-uITFQYO68pMEYR46AHgQoyBg7KPPJDAbGn4jUTIRgCFJIp88MIBUianVOplhZDEec07bp9zIyr4Kp0FCyQzmWg==
|
||||
dependencies:
|
||||
core-js-pure "^3.25.1"
|
||||
regenerator-runtime "^0.13.10"
|
||||
core-js-pure "^3.30.2"
|
||||
regenerator-runtime "^0.14.0"
|
||||
|
||||
"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.13", "@babel/runtime@^7.15.4", "@babel/runtime@^7.8.4":
|
||||
version "7.20.1"
|
||||
resolved "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.20.1.tgz#1148bb33ab252b165a06698fde7576092a78b4a9"
|
||||
integrity sha512-mrzLkl6U9YLF8qpqI7TB82PESyEGjm/0Ly91jG575eVxMMlb8fYfOXFZIJ8XfLrJZQbm7dlKry2bJmXBUEkdFg==
|
||||
version "7.26.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.26.10.tgz#a07b4d8fa27af131a633d7b3524db803eb4764c2"
|
||||
integrity sha512-2WJMeRQPHKSPemqk/awGrAiuFfzBmOIPXKizAsVhWH9YJqLZ0H+HS4c8loHGgW6utJ3E/ejXQUsiGaQy2NZ9Fw==
|
||||
dependencies:
|
||||
regenerator-runtime "^0.13.10"
|
||||
regenerator-runtime "^0.14.0"
|
||||
|
||||
"@babel/template@^7.12.7", "@babel/template@^7.18.10":
|
||||
version "7.18.10"
|
||||
@@ -1228,6 +1253,15 @@
|
||||
"@babel/parser" "^7.22.15"
|
||||
"@babel/types" "^7.22.15"
|
||||
|
||||
"@babel/template@^7.26.9":
|
||||
version "7.26.9"
|
||||
resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.26.9.tgz#4577ad3ddf43d194528cff4e1fa6b232fa609bb2"
|
||||
integrity sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==
|
||||
dependencies:
|
||||
"@babel/code-frame" "^7.26.2"
|
||||
"@babel/parser" "^7.26.9"
|
||||
"@babel/types" "^7.26.9"
|
||||
|
||||
"@babel/traverse@^7.12.13", "@babel/traverse@^7.12.9", "@babel/traverse@^7.19.0", "@babel/traverse@^7.19.1", "@babel/traverse@^7.20.1":
|
||||
version "7.23.6"
|
||||
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.23.6.tgz#b53526a2367a0dd6edc423637f3d2d0f2521abc5"
|
||||
@@ -1262,6 +1296,14 @@
|
||||
"@babel/helper-validator-identifier" "^7.22.20"
|
||||
to-fast-properties "^2.0.0"
|
||||
|
||||
"@babel/types@^7.26.10", "@babel/types@^7.26.9":
|
||||
version "7.26.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.10.tgz#396382f6335bd4feb65741eacfc808218f859259"
|
||||
integrity sha512-emqcG3vHrpxUKTrxcblR36dcrcoRDvKmnL/dCL6ZsHaShW80qxCAcNhzQZrpeM765VzEos+xOi4s+r4IXzTwdQ==
|
||||
dependencies:
|
||||
"@babel/helper-string-parser" "^7.25.9"
|
||||
"@babel/helper-validator-identifier" "^7.25.9"
|
||||
|
||||
"@docsearch/css@3.3.0":
|
||||
version "3.3.0"
|
||||
resolved "https://registry.npmmirror.com/@docsearch/css/-/css-3.3.0.tgz#d698e48302d12240d7c2f7452ccb2d2239a8cd80"
|
||||
@@ -2592,9 +2634,9 @@ ajv@^8.0.0, ajv@^8.8.0:
|
||||
uri-js "^4.2.2"
|
||||
|
||||
algoliasearch-helper@^3.5.5:
|
||||
version "3.11.1"
|
||||
resolved "https://registry.npmmirror.com/algoliasearch-helper/-/algoliasearch-helper-3.11.1.tgz#d83ab7f1a2a374440686ef7a144b3c288b01188a"
|
||||
integrity sha512-mvsPN3eK4E0bZG0/WlWJjeqe/bUD2KOEVOl0GyL/TGXn6wcpZU8NOuztGHCUKXkyg5gq6YzUakVTmnmSSO5Yiw==
|
||||
version "3.26.0"
|
||||
resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.26.0.tgz#d6e283396a9fc5bf944f365dc3b712570314363f"
|
||||
integrity sha512-Rv2x3GXleQ3ygwhkhJubhhYGsICmShLAiqtUuJTUkr9uOCOXyF2E71LVT4XDnVffbknv8XgScP4U0Oxtgm+hIw==
|
||||
dependencies:
|
||||
"@algolia/events" "^4.0.1"
|
||||
|
||||
@@ -2863,9 +2905,9 @@ boxen@^5.0.0, boxen@^5.0.1:
|
||||
wrap-ansi "^7.0.0"
|
||||
|
||||
brace-expansion@^1.1.7:
|
||||
version "1.1.11"
|
||||
resolved "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd"
|
||||
integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
|
||||
version "1.1.12"
|
||||
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.12.tgz#ab9b454466e5a8cc3a187beaad580412a9c5b843"
|
||||
integrity sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==
|
||||
dependencies:
|
||||
balanced-match "^1.0.0"
|
||||
concat-map "0.0.1"
|
||||
@@ -2995,15 +3037,10 @@ caniuse-api@^3.0.0:
|
||||
lodash.memoize "^4.1.2"
|
||||
lodash.uniq "^4.5.0"
|
||||
|
||||
caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001400, caniuse-lite@^1.0.30001426:
|
||||
version "1.0.30001430"
|
||||
resolved "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001430.tgz#638a8ae00b5a8a97e66ff43733b2701f81b101fa"
|
||||
integrity sha512-IB1BXTZKPDVPM7cnV4iaKaHxckvdr/3xtctB3f7Hmenx3qYBhGtTZ//7EllK66aKXW98Lx0+7Yr0kxBtIt3tzg==
|
||||
|
||||
caniuse-lite@^1.0.30001646:
|
||||
version "1.0.30001657"
|
||||
resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001657.tgz#29fd504bffca719d1c6b63a1f6f840be1973a660"
|
||||
integrity sha512-DPbJAlP8/BAXy3IgiWmZKItubb3TYGP0WscQQlVGIfT4s/YlFYVuJgyOsQNP7rJRChx/qdMeLJQJP0Sgg2yjNA==
|
||||
caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001400, caniuse-lite@^1.0.30001426, caniuse-lite@^1.0.30001646:
|
||||
version "1.0.30001718"
|
||||
resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001718.tgz"
|
||||
integrity sha512-AflseV1ahcSunK53NfEs9gFWgOEmzr0f+kaMFA4xiLZlr9Hzt7HxcSpIFcnNCUkz6R6dWKa54rUz3HUmI3nVcw==
|
||||
|
||||
ccount@^1.0.0, ccount@^1.0.3:
|
||||
version "1.1.0"
|
||||
@@ -3326,10 +3363,10 @@ core-js-compat@^3.25.1:
|
||||
dependencies:
|
||||
browserslist "^4.21.4"
|
||||
|
||||
core-js-pure@^3.25.1:
|
||||
version "3.26.0"
|
||||
resolved "https://registry.npmmirror.com/core-js-pure/-/core-js-pure-3.26.0.tgz#7ad8a5dd7d910756f3124374b50026e23265ca9a"
|
||||
integrity sha512-LiN6fylpVBVwT8twhhluD9TzXmZQQsr2I2eIKtWNbZI1XMfBT7CV18itaN6RA7EtQd/SDdRx/wzvAShX2HvhQA==
|
||||
core-js-pure@^3.30.2:
|
||||
version "3.41.0"
|
||||
resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.41.0.tgz#349fecad168d60807a31e83c99d73d786fe80811"
|
||||
integrity sha512-71Gzp96T9YPk63aUvE5Q5qP+DryB4ZloUZPSOebGM88VNw8VNfvdA7z6kGA8iGOTEzAomsRidp4jXSmUIJsL+Q==
|
||||
|
||||
core-js@^3.18.0:
|
||||
version "3.26.0"
|
||||
@@ -3371,9 +3408,9 @@ cross-fetch@^3.1.5:
|
||||
node-fetch "2.6.7"
|
||||
|
||||
cross-spawn@^7.0.3:
|
||||
version "7.0.3"
|
||||
resolved "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
|
||||
integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
|
||||
version "7.0.6"
|
||||
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f"
|
||||
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==
|
||||
dependencies:
|
||||
path-key "^3.1.0"
|
||||
shebang-command "^2.0.0"
|
||||
@@ -4830,9 +4867,9 @@ http-parser-js@>=0.5.1:
|
||||
integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==
|
||||
|
||||
http-proxy-middleware@^2.0.3:
|
||||
version "2.0.7"
|
||||
resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz#915f236d92ae98ef48278a95dedf17e991936ec6"
|
||||
integrity sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==
|
||||
version "2.0.9"
|
||||
resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz#e9e63d68afaa4eee3d147f39149ab84c0c2815ef"
|
||||
integrity sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==
|
||||
dependencies:
|
||||
"@types/http-proxy" "^1.17.8"
|
||||
http-proxy "^1.18.1"
|
||||
@@ -5709,9 +5746,9 @@ multicast-dns@^7.2.5:
|
||||
thunky "^1.0.2"
|
||||
|
||||
nanoid@^3.3.6:
|
||||
version "3.3.6"
|
||||
resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.6.tgz#443380c856d6e9f9824267d960b4236ad583ea4c"
|
||||
integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==
|
||||
version "3.3.8"
|
||||
resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf"
|
||||
integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==
|
||||
|
||||
negotiator@0.6.3:
|
||||
version "0.6.3"
|
||||
@@ -6441,9 +6478,9 @@ prism-react-renderer@^1.2.1:
|
||||
integrity sha512-IJ+MSwBWKG+SM3b2SUfdrhC+gu01QkV2KmRQgREThBfSQRoufqRfxfHUxpG1WcaFjP+kojcFyO9Qqtpgt3qLCg==
|
||||
|
||||
prismjs@^1.23.0:
|
||||
version "1.29.0"
|
||||
resolved "https://registry.npmmirror.com/prismjs/-/prismjs-1.29.0.tgz#f113555a8fa9b57c35e637bba27509dcf802dd12"
|
||||
integrity sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==
|
||||
version "1.30.0"
|
||||
resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.30.0.tgz#d9709969d9d4e16403f6f348c63553b19f0975a9"
|
||||
integrity sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==
|
||||
|
||||
process-nextick-args@~2.0.0:
|
||||
version "2.0.1"
|
||||
@@ -6816,10 +6853,10 @@ regenerate@^1.4.2:
|
||||
resolved "https://registry.npmmirror.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a"
|
||||
integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==
|
||||
|
||||
regenerator-runtime@^0.13.10:
|
||||
version "0.13.10"
|
||||
resolved "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.13.10.tgz#ed07b19616bcbec5da6274ebc75ae95634bfc2ee"
|
||||
integrity sha512-KepLsg4dU12hryUO7bp/axHAKvwGOCV0sGloQtpagJ12ai+ojVDqkeGSiRX1zlq+kjIMZ1t7gpze+26QqtdGqw==
|
||||
regenerator-runtime@^0.14.0:
|
||||
version "0.14.1"
|
||||
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f"
|
||||
integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==
|
||||
|
||||
regenerator-transform@^0.15.0:
|
||||
version "0.15.0"
|
||||
@@ -7272,14 +7309,7 @@ send@0.19.0:
|
||||
range-parser "~1.2.1"
|
||||
statuses "2.0.1"
|
||||
|
||||
serialize-javascript@^6.0.0:
|
||||
version "6.0.0"
|
||||
resolved "https://registry.npmmirror.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8"
|
||||
integrity sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==
|
||||
dependencies:
|
||||
randombytes "^2.1.0"
|
||||
|
||||
serialize-javascript@^6.0.1:
|
||||
serialize-javascript@^6.0.0, serialize-javascript@^6.0.1:
|
||||
version "6.0.2"
|
||||
resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.2.tgz#defa1e055c83bf6d59ea805d8da862254eb6a6c2"
|
||||
integrity sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==
|
||||
|
||||
Reference in New Issue
Block a user