mirror of
https://github.com/microsoft/FLAML.git
synced 2026-02-16 05:32:24 +08:00
Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7729855f49 | ||
|
|
0fe284b21f | ||
|
|
853c9501bc | ||
|
|
8e63dd417b | ||
|
|
f27f98c6d7 | ||
|
|
a68d073ccf | ||
|
|
15fda2206b | ||
|
|
a9d7b7f971 | ||
|
|
d24d2e0088 | ||
|
|
67f4048667 | ||
|
|
d8129b9211 | ||
|
|
165d7467f9 | ||
|
|
3de0dc667e | ||
|
|
6840dc2b09 | ||
|
|
1a9fa3ac23 | ||
|
|
325baa40a5 | ||
|
|
550d1cfe9b | ||
|
|
249f0f1708 | ||
|
|
b645da3ea7 | ||
|
|
0415638dd1 | ||
|
|
6b93c2e394 | ||
|
|
a93bf39720 | ||
|
|
dc8060a21b | ||
|
|
30db685cee | ||
|
|
fda9fa0103 | ||
|
|
830ec4541c | ||
|
|
46162578f8 | ||
|
|
8658e51182 | ||
|
|
868e7dd1ca | ||
|
|
4886cb5689 | ||
|
|
599731cb22 | ||
|
|
0cb79dfdff | ||
|
|
f70df312f4 | ||
|
|
93b9e09166 | ||
|
|
3c6e191044 | ||
|
|
5f9b514be7 | ||
|
|
44932712c4 | ||
|
|
f0731e2240 |
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -13,6 +13,7 @@
|
||||
## Checks
|
||||
|
||||
<!-- - I've used [pre-commit](https://microsoft.github.io/FLAML/docs/Contribute#pre-commit) to lint the changes in this PR (note the same in integrated in our CI checks). -->
|
||||
|
||||
- [ ] I've included any doc changes needed for https://microsoft.github.io/FLAML/. See https://microsoft.github.io/FLAML/docs/Contribute#documentation to build and test documentation locally.
|
||||
- [ ] I've added tests (if relevant) corresponding to the changes introduced in this PR.
|
||||
- [ ] I've made sure all auto checks have passed.
|
||||
|
||||
3
.github/workflows/deploy-website.yml
vendored
3
.github/workflows/deploy-website.yml
vendored
@@ -17,6 +17,9 @@ on:
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
checks:
|
||||
if: github.event_name != 'push'
|
||||
|
||||
2
.github/workflows/openai.yml
vendored
2
.github/workflows/openai.yml
vendored
@@ -13,6 +13,8 @@ on:
|
||||
- 'notebook/autogen_chatgpt_gpt4.ipynb'
|
||||
- '.github/workflows/openai.yml'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
|
||||
1
.github/workflows/pre-commit.yml
vendored
1
.github/workflows/pre-commit.yml
vendored
@@ -10,6 +10,7 @@ defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions: {}
|
||||
jobs:
|
||||
|
||||
pre-commit-check:
|
||||
|
||||
35
.github/workflows/python-package.yml
vendored
35
.github/workflows/python-package.yml
vendored
@@ -17,6 +17,7 @@ on:
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
|
||||
permissions: {}
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
@@ -29,19 +30,17 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-2019]
|
||||
python-version: ["3.8", "3.9", "3.10"]
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: On mac + python 3.10, install libomp to facilitate lgbm and xgboost install
|
||||
if: matrix.os == 'macOS-latest' && matrix.python-version == '3.10'
|
||||
- name: On mac, install libomp to facilitate lgbm and xgboost install
|
||||
if: matrix.os == 'macOS-latest'
|
||||
run: |
|
||||
# remove libomp version constraint after xgboost works with libomp>11.1.0 on python 3.10
|
||||
wget https://raw.githubusercontent.com/Homebrew/homebrew-core/679923b4eb48a8dc7ecc1f05d06063cd79b3fc00/Formula/libomp.rb -O $(find $(brew --repository) -name libomp.rb)
|
||||
brew unlink libomp
|
||||
brew update
|
||||
brew install libomp
|
||||
export CC=/usr/bin/clang
|
||||
export CXX=/usr/bin/clang++
|
||||
@@ -60,24 +59,26 @@ jobs:
|
||||
run: |
|
||||
pip install pyspark==3.2.3
|
||||
pip list | grep "pyspark"
|
||||
- name: If linux, install ray 2
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
- name: If linux and python<3.11, install ray 2
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.python-version != '3.11'
|
||||
run: |
|
||||
pip install "ray[tune]<2.5.0"
|
||||
- name: If mac, install ray
|
||||
if: matrix.os == 'macOS-latest'
|
||||
- name: If mac and python 3.10, install ray and xgboost 1
|
||||
if: matrix.os == 'macOS-latest' && matrix.python-version == '3.10'
|
||||
run: |
|
||||
pip install -e .[ray]
|
||||
- name: If linux or mac, install prophet on python < 3.9
|
||||
if: (matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest') && matrix.python-version != '3.9' && matrix.python-version != '3.10'
|
||||
# use macOS to test xgboost 1, but macOS also supports xgboost 2
|
||||
pip install "xgboost<2"
|
||||
- name: If linux, install prophet on python < 3.9
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.8'
|
||||
run: |
|
||||
pip install -e .[forecast]
|
||||
- name: Install vw on python < 3.10
|
||||
if: matrix.python-version != '3.10'
|
||||
if: matrix.python-version == '3.8' || matrix.python-version == '3.9'
|
||||
run: |
|
||||
pip install -e .[vw]
|
||||
- name: Uninstall pyspark on (python 3.9) or (python 3.8 + windows)
|
||||
if: matrix.python-version == '3.9' || (matrix.python-version == '3.8' && matrix.os == 'windows-2019')
|
||||
- name: Uninstall pyspark on (python 3.9) or windows
|
||||
if: matrix.python-version == '3.9' || matrix.os == 'windows-2019'
|
||||
run: |
|
||||
# Uninstall pyspark to test env without pyspark
|
||||
pip uninstall -y pyspark
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -165,3 +165,4 @@ flaml/tune/spark/mylearner.py
|
||||
|
||||
# local config files
|
||||
*.config.local
|
||||
patch.diff
|
||||
|
||||
@@ -22,10 +22,21 @@ repos:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: no-commit-to-branch
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/executablebooks/mdformat
|
||||
rev: 0.7.17
|
||||
hooks:
|
||||
- id: mdformat
|
||||
additional_dependencies:
|
||||
- mdformat-gfm
|
||||
- mdformat-black
|
||||
- mdformat_frontmatter
|
||||
|
||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
rev: v0.0.261
|
||||
hooks:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# basic setup
|
||||
FROM python:3.7
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.8
|
||||
RUN apt-get update && apt-get -y update
|
||||
RUN apt-get install -y sudo git npm
|
||||
|
||||
|
||||
375
NOTICE.md
375
NOTICE.md
@@ -1,221 +1,222 @@
|
||||
NOTICES
|
||||
# NOTICES
|
||||
|
||||
This repository incorporates material as listed below or described in the code.
|
||||
|
||||
#
|
||||
## Component. Ray.
|
||||
|
||||
Code in tune/[analysis.py, sample.py, trial.py, result.py],
|
||||
searcher/[suggestion.py, variant_generator.py], and scheduler/trial_scheduler.py is adapted from
|
||||
Code in tune/\[analysis.py, sample.py, trial.py, result.py\],
|
||||
searcher/\[suggestion.py, variant_generator.py\], and scheduler/trial_scheduler.py is adapted from
|
||||
https://github.com/ray-project/ray/blob/master/python/ray/tune/
|
||||
|
||||
|
||||
|
||||
## Open Source License/Copyright Notice.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
1. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
1. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
1. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
1. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
1. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
1. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
1. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
1. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
```
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
```
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
```
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
```
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
______________________________________________________________________
|
||||
|
||||
Code in python/ray/rllib/{evolution_strategies, dqn} adapted from
|
||||
https://github.com/openai (MIT License)
|
||||
@@ -240,7 +241,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
______________________________________________________________________
|
||||
|
||||
Code in python/ray/rllib/impala/vtrace.py from
|
||||
https://github.com/deepmind/scalable_agent
|
||||
@@ -251,7 +252,9 @@ Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
```
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
```
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -259,7 +262,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
______________________________________________________________________
|
||||
|
||||
Code in python/ray/rllib/ars is adapted from https://github.com/modestyachts/ARS
|
||||
|
||||
Copyright (c) 2018, ARS contributors (Horia Mania, Aurelia Guy, Benjamin Recht)
|
||||
@@ -269,11 +273,11 @@ Redistribution and use of ARS in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
1. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
@@ -286,5 +290,6 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
------------------
|
||||
Code in python/ray/_private/prometheus_exporter.py is adapted from https://github.com/census-instrumentation/opencensus-python/blob/master/contrib/opencensus-ext-prometheus/opencensus/ext/prometheus/stats_exporter/__init__.py
|
||||
______________________________________________________________________
|
||||
|
||||
Code in python/ray/\_private/prometheus_exporter.py is adapted from https://github.com/census-instrumentation/opencensus-python/blob/master/contrib/opencensus-ext-prometheus/opencensus/ext/prometheus/stats_exporter/__init__.py
|
||||
|
||||
51
README.md
51
README.md
@@ -4,8 +4,8 @@
|
||||

|
||||
[](https://pepy.tech/project/flaml)
|
||||
[](https://discord.gg/Cppx2vSPVP)
|
||||
<!-- [](https://gitter.im/FLAMLer/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -->
|
||||
|
||||
<!-- [](https://gitter.im/FLAMLer/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -->
|
||||
|
||||
# A Fast Library for Automated Machine Learning & Tuning
|
||||
|
||||
@@ -14,25 +14,27 @@
|
||||
<br>
|
||||
</p>
|
||||
|
||||
:fire: The automated multi-agent chat framework in [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) is in preview from v2.0.0.
|
||||
:fire: Heads-up: We have migrated [AutoGen](https://microsoft.github.io/autogen/) into a dedicated [github repository](https://github.com/microsoft/autogen). Alongside this move, we have also launched a dedicated [Discord](https://discord.gg/pAbnFJrkgZ) server and a [website](https://microsoft.github.io/autogen/) for comprehensive documentation.
|
||||
|
||||
:fire: The automated multi-agent chat framework in [AutoGen](https://microsoft.github.io/autogen/) is in preview from v2.0.0.
|
||||
|
||||
:fire: FLAML is highlighted in OpenAI's [cookbook](https://github.com/openai/openai-cookbook#related-resources-from-around-the-web).
|
||||
|
||||
:fire: [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) is released with support for ChatGPT and GPT-4, based on [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673).
|
||||
|
||||
:fire: FLAML supports AutoML and Hyperparameter Tuning features in [Microsoft Fabric](https://learn.microsoft.com/en-us/fabric/get-started/microsoft-fabric-overview) private preview. Sign up for these features at: https://aka.ms/fabric/data-science/sign-up.
|
||||
:fire: [autogen](https://microsoft.github.io/autogen/) is released with support for ChatGPT and GPT-4, based on [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673).
|
||||
|
||||
:fire: FLAML supports Code-First AutoML & Tuning – Private Preview in [Microsoft Fabric Data Science](https://learn.microsoft.com/en-us/fabric/data-science/).
|
||||
|
||||
## What is FLAML
|
||||
|
||||
FLAML is a lightweight Python library for efficient automation of machine
|
||||
learning and AI operations. It automates workflow based on large language models, machine learning models, etc.
|
||||
and optimizes their performance.
|
||||
|
||||
* FLAML enables building next-gen GPT-X applications based on multi-agent conversations with minimal effort. It simplifies the orchestration, automation and optimization of a complex GPT-X workflow. It maximizes the performance of GPT-X models and augments their weakness.
|
||||
* For common machine learning tasks like classification and regression, it quickly finds quality models for user-provided data with low computational resources. It is easy to customize or extend. Users can find their desired customizability from a smooth range.
|
||||
* It supports fast and economical automatic tuning (e.g., inference hyperparameters for foundation models, configurations in MLOps/LMOps workflows, pipelines, mathematical/statistical models, algorithms, computing experiments, software configurations), capable of handling large search space with heterogeneous evaluation cost and complex constraints/guidance/early stopping.
|
||||
- FLAML enables building next-gen GPT-X applications based on multi-agent conversations with minimal effort. It simplifies the orchestration, automation and optimization of a complex GPT-X workflow. It maximizes the performance of GPT-X models and augments their weakness.
|
||||
- For common machine learning tasks like classification and regression, it quickly finds quality models for user-provided data with low computational resources. It is easy to customize or extend. Users can find their desired customizability from a smooth range.
|
||||
- It supports fast and economical automatic tuning (e.g., inference hyperparameters for foundation models, configurations in MLOps/LMOps workflows, pipelines, mathematical/statistical models, algorithms, computing experiments, software configurations), capable of handling large search space with heterogeneous evaluation cost and complex constraints/guidance/early stopping.
|
||||
|
||||
FLAML is powered by a series of [research studies](/docs/Research) from Microsoft Research and collaborators such as Penn State University, Stevens Institute of Technology, University of Washington, and University of Waterloo.
|
||||
FLAML is powered by a series of [research studies](https://microsoft.github.io/FLAML/docs/Research/) from Microsoft Research and collaborators such as Penn State University, Stevens Institute of Technology, University of Washington, and University of Waterloo.
|
||||
|
||||
FLAML has a .NET implementation in [ML.NET](http://dot.net/ml), an open-source, cross-platform machine learning framework for .NET.
|
||||
|
||||
@@ -44,7 +46,8 @@ FLAML requires **Python version >= 3.8**. It can be installed from pip:
|
||||
pip install flaml
|
||||
```
|
||||
|
||||
Minimal dependencies are installed without extra options. You can install extra options based on the feature you need. For example, use the following to install the dependencies needed by the [`autogen`](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) package.
|
||||
Minimal dependencies are installed without extra options. You can install extra options based on the feature you need. For example, use the following to install the dependencies needed by the [`autogen`](https://microsoft.github.io/autogen/) package.
|
||||
|
||||
```bash
|
||||
pip install "flaml[autogen]"
|
||||
```
|
||||
@@ -54,18 +57,24 @@ Each of the [`notebook examples`](https://github.com/microsoft/FLAML/tree/main/n
|
||||
|
||||
## Quickstart
|
||||
|
||||
* (New) The [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) package enables the next-gen GPT-X applications with a generic multi-agent conversation framework.
|
||||
It offers customizable and conversable agents which integrate LLMs, tools and human.
|
||||
By automating chat among multiple capable agents, one can easily make them collectively perform tasks autonomously or with human feedback, including tasks that require using tools via code. For example,
|
||||
- (New) The [autogen](https://microsoft.github.io/autogen/) package enables the next-gen GPT-X applications with a generic multi-agent conversation framework.
|
||||
It offers customizable and conversable agents which integrate LLMs, tools and human.
|
||||
By automating chat among multiple capable agents, one can easily make them collectively perform tasks autonomously or with human feedback, including tasks that require using tools via code. For example,
|
||||
|
||||
```python
|
||||
from flaml import autogen
|
||||
|
||||
assistant = autogen.AssistantAgent("assistant")
|
||||
user_proxy = autogen.UserProxyAgent("user_proxy")
|
||||
user_proxy.initiate_chat(assistant, message="Show me the YTD gain of 10 largest technology companies as of today.")
|
||||
user_proxy.initiate_chat(
|
||||
assistant,
|
||||
message="Show me the YTD gain of 10 largest technology companies as of today.",
|
||||
)
|
||||
# This initiates an automated chat between the two agents to solve the task
|
||||
```
|
||||
|
||||
Autogen also helps maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4. It offers a drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
|
||||
|
||||
```python
|
||||
# perform tuning
|
||||
config, analysis = autogen.Completion.tune(
|
||||
@@ -80,30 +89,32 @@ config, analysis = autogen.Completion.tune(
|
||||
# perform inference for a test instance
|
||||
response = autogen.Completion.create(context=test_instance, **config)
|
||||
```
|
||||
* With three lines of code, you can start using this economical and fast
|
||||
AutoML engine as a [scikit-learn style estimator](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML).
|
||||
|
||||
- With three lines of code, you can start using this economical and fast
|
||||
AutoML engine as a [scikit-learn style estimator](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML).
|
||||
|
||||
```python
|
||||
from flaml import AutoML
|
||||
|
||||
automl = AutoML()
|
||||
automl.fit(X_train, y_train, task="classification")
|
||||
```
|
||||
|
||||
* You can restrict the learners and use FLAML as a fast hyperparameter tuning
|
||||
tool for XGBoost, LightGBM, Random Forest etc. or a [customized learner](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML#estimator-and-search-space).
|
||||
- You can restrict the learners and use FLAML as a fast hyperparameter tuning
|
||||
tool for XGBoost, LightGBM, Random Forest etc. or a [customized learner](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML#estimator-and-search-space).
|
||||
|
||||
```python
|
||||
automl.fit(X_train, y_train, task="classification", estimator_list=["lgbm"])
|
||||
```
|
||||
|
||||
* You can also run generic hyperparameter tuning for a [custom function](https://microsoft.github.io/FLAML/docs/Use-Cases/Tune-User-Defined-Function).
|
||||
- You can also run generic hyperparameter tuning for a [custom function](https://microsoft.github.io/FLAML/docs/Use-Cases/Tune-User-Defined-Function).
|
||||
|
||||
```python
|
||||
from flaml import tune
|
||||
tune.run(evaluation_function, config={…}, low_cost_partial_config={…}, time_budget_s=3600)
|
||||
```
|
||||
|
||||
* [Zero-shot AutoML](https://microsoft.github.io/FLAML/docs/Use-Cases/Zero-Shot-AutoML) allows using the existing training API from lightgbm, xgboost etc. while getting the benefit of AutoML in choosing high-performance hyperparameter configurations per task.
|
||||
- [Zero-shot AutoML](https://microsoft.github.io/FLAML/docs/Use-Cases/Zero-Shot-AutoML) allows using the existing training API from lightgbm, xgboost etc. while getting the benefit of AutoML in choosing high-performance hyperparameter configurations per task.
|
||||
|
||||
```python
|
||||
from flaml.default import LGBMRegressor
|
||||
|
||||
16
SECURITY.md
16
SECURITY.md
@@ -4,7 +4,7 @@
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below.
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](<https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)>), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
@@ -18,13 +18,13 @@ You should receive a response within 24 hours. If for some reason you do not, pl
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
- Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
- Full paths of source file(s) related to the manifestation of the issue
|
||||
- The location of the affected source code (tag/branch/commit or direct URL)
|
||||
- Any special configuration required to reproduce the issue
|
||||
- Step-by-step instructions to reproduce the issue
|
||||
- Proof-of-concept or exploit code (if possible)
|
||||
- Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import logging
|
||||
from flaml.automl import AutoML, logger_formatter
|
||||
from flaml.tune.searcher import CFO, BlendSearch, FLOW2, BlendSearchTuner, RandomSearch
|
||||
from flaml.onlineml.autovw import AutoVW
|
||||
from flaml.version import __version__
|
||||
|
||||
from flaml.automl import AutoML, logger_formatter
|
||||
from flaml.onlineml.autovw import AutoVW
|
||||
from flaml.tune.searcher import CFO, FLOW2, BlendSearch, BlendSearchTuner, RandomSearch
|
||||
from flaml.version import __version__
|
||||
|
||||
# Set the root logger.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from .oai import *
|
||||
from .agentchat import *
|
||||
from .code_utils import DEFAULT_MODEL, FAST_MODEL
|
||||
from .oai import *
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from .agent import Agent
|
||||
from .responsive_agent import ResponsiveAgent
|
||||
from .assistant_agent import AssistantAgent
|
||||
from .user_proxy_agent import UserProxyAgent
|
||||
from .conversable_agent import ConversableAgent
|
||||
from .groupchat import GroupChat, GroupChatManager
|
||||
from .user_proxy_agent import UserProxyAgent
|
||||
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"ResponsiveAgent",
|
||||
"ConversableAgent",
|
||||
"AssistantAgent",
|
||||
"UserProxyAgent",
|
||||
"GroupChat",
|
||||
|
||||
@@ -25,10 +25,10 @@ class Agent:
|
||||
return self._name
|
||||
|
||||
def send(self, message: Union[Dict, str], recipient: "Agent", request_reply: Optional[bool] = None):
|
||||
"""(Aabstract method) Send a message to another agent."""
|
||||
"""(Abstract method) Send a message to another agent."""
|
||||
|
||||
async def a_send(self, message: Union[Dict, str], recipient: "Agent", request_reply: Optional[bool] = None):
|
||||
"""(Aabstract async method) Send a message to another agent."""
|
||||
"""(Abstract async method) Send a message to another agent."""
|
||||
|
||||
def receive(self, message: Union[Dict, str], sender: "Agent", request_reply: Optional[bool] = None):
|
||||
"""(Abstract method) Receive a message from another agent."""
|
||||
|
||||
@@ -1,29 +1,30 @@
|
||||
from .responsive_agent import ResponsiveAgent
|
||||
from typing import Callable, Dict, Optional, Union
|
||||
|
||||
from .conversable_agent import ConversableAgent
|
||||
|
||||
class AssistantAgent(ResponsiveAgent):
|
||||
"""(In preview) Assistant agent, designed to solve a task with LLM.
|
||||
|
||||
AssistantAgent is a subclass of ResponsiveAgent configured with a default system message.
|
||||
The default system message is designed to solve a task with LLM,
|
||||
including suggesting python code blocks and debugging.
|
||||
`human_input_mode` is default to "NEVER"
|
||||
and `code_execution_config` is default to False.
|
||||
This agent doesn't execute code by default, and expects the user to execute the code.
|
||||
class AssistantAgent(ConversableAgent):
|
||||
"""(In preview) Assistant agent, designed to solve tasks with LLM.
|
||||
|
||||
AssistantAgent is a subclass of ConversableAgent configured with a default system message.
|
||||
The default system message is designed to solve tasks with LLM,
|
||||
including suggesting Python code blocks and debugging.
|
||||
`human_input_mode` defaults to "NEVER"
|
||||
and `code_execution_config` defaults to False.
|
||||
This agent doesn't execute code by default and expects the user to execute the code.
|
||||
"""
|
||||
|
||||
DEFAULT_SYSTEM_MESSAGE = """You are a helpful AI assistant.
|
||||
Solve tasks using your coding and language skills.
|
||||
If a plan is not provided, explain the plan first. Be clear which step uses code, and which step uses your language skill.
|
||||
In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
|
||||
Solve tasks using your coding and language skills.
|
||||
In the following cases, suggest Python code (in a Python coding block) or shell script (in an sh coding block) for the user to execute.
|
||||
1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
|
||||
2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. Solve the task step by step if you need to.
|
||||
You must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
|
||||
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
|
||||
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
|
||||
When you find an answer, verify the answer carefully. If a function for planning is provided, call the function to make plans and verify the execution.
|
||||
Reply "TERMINATE" in the end when everything is done.
|
||||
2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
|
||||
Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
|
||||
When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
|
||||
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use the 'print' function for the output when relevant. Check the execution result returned by the user.
|
||||
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
|
||||
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
|
||||
Reply "TERMINATE" in the end when everything is done.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -35,24 +36,24 @@ class AssistantAgent(ResponsiveAgent):
|
||||
max_consecutive_auto_reply: Optional[int] = None,
|
||||
human_input_mode: Optional[str] = "NEVER",
|
||||
code_execution_config: Optional[Union[Dict, bool]] = False,
|
||||
**kwargs,
|
||||
**kwargs: Dict,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
name (str): agent name.
|
||||
system_message (str): system message for the ChatCompletion inference.
|
||||
Please override this attribute if you want to reprogram the agent.
|
||||
llm_config (dict): llm inference configuration.
|
||||
Please refer to [autogen.Completion.create](/docs/reference/autogen/oai/completion#create)
|
||||
name (str): Agent name.
|
||||
system_message (Optional[str]): System message for the ChatCompletion inference.
|
||||
Override this attribute if you want to reprogram the agent.
|
||||
llm_config (Optional[Union[Dict, bool]]): LLM inference configuration.
|
||||
Refer to [autogen.Completion.create](/docs/reference/autogen/oai/completion#create)
|
||||
for available options.
|
||||
is_termination_msg (function): a function that takes a message in the form of a dictionary
|
||||
is_termination_msg (Optional[Callable[[Dict], bool]]): A function that takes a message in the form of a dictionary
|
||||
and returns a boolean value indicating if this received message is a termination message.
|
||||
The dict can contain the following keys: "content", "role", "name", "function_call".
|
||||
max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
|
||||
default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
|
||||
max_consecutive_auto_reply (Optional[int]): The maximum number of consecutive auto replies.
|
||||
Defaults to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
|
||||
The limit only plays a role when human_input_mode is not "ALWAYS".
|
||||
**kwargs (dict): Please refer to other kwargs in
|
||||
[ResponsiveAgent](responsive_agent#__init__).
|
||||
**kwargs (Dict): Additional keyword arguments. Refer to other kwargs in
|
||||
[ConversableAgent](conversable_agent#__init__).
|
||||
"""
|
||||
super().__init__(
|
||||
name,
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import re
|
||||
import os
|
||||
from pydantic import BaseModel, Extra, root_validator
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
import re
|
||||
from time import sleep
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Extra, root_validator
|
||||
|
||||
from flaml.autogen.agentchat import Agent, UserProxyAgent
|
||||
from flaml.autogen.code_utils import UNKNOWN, extract_code, execute_code, infer_lang
|
||||
from flaml.autogen.code_utils import UNKNOWN, execute_code, extract_code, infer_lang
|
||||
from flaml.autogen.math_utils import get_answer
|
||||
|
||||
|
||||
PROMPTS = {
|
||||
# default
|
||||
"default": """Let's use Python to solve a math problem.
|
||||
@@ -165,7 +165,7 @@ class MathUserProxyAgent(UserProxyAgent):
|
||||
default_auto_reply=default_auto_reply,
|
||||
**kwargs,
|
||||
)
|
||||
self.register_auto_reply([Agent, None], MathUserProxyAgent._generate_math_reply, 1)
|
||||
self.register_reply([Agent, None], MathUserProxyAgent._generate_math_reply, 1)
|
||||
# fixed var
|
||||
self._max_invalid_q_per_step = max_invalid_q_per_step
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from flaml.autogen.agentchat.agent import Agent
|
||||
from flaml.autogen.agentchat.assistant_agent import AssistantAgent
|
||||
from typing import Callable, Dict, Optional, Union, List, Tuple, Any
|
||||
|
||||
|
||||
class RetrieveAssistantAgent(AssistantAgent):
|
||||
@@ -16,7 +17,7 @@ class RetrieveAssistantAgent(AssistantAgent):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.register_auto_reply(Agent, RetrieveAssistantAgent._generate_retrieve_assistant_reply)
|
||||
self.register_reply(Agent, RetrieveAssistantAgent._generate_retrieve_assistant_reply)
|
||||
|
||||
def _generate_retrieve_assistant_reply(
|
||||
self,
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import chromadb
|
||||
from flaml.autogen.agentchat.agent import Agent
|
||||
from flaml.autogen.agentchat import UserProxyAgent
|
||||
from flaml.autogen.retrieve_utils import create_vector_db_from_dir, query_vector_db, num_tokens_from_text
|
||||
from flaml.autogen.code_utils import extract_code
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from typing import Callable, Dict, Optional, Union, List, Tuple, Any
|
||||
import chromadb
|
||||
from IPython import get_ipython
|
||||
|
||||
from flaml.autogen.agentchat import UserProxyAgent
|
||||
from flaml.autogen.agentchat.agent import Agent
|
||||
from flaml.autogen.code_utils import extract_code
|
||||
from flaml.autogen.retrieve_utils import create_vector_db_from_dir, num_tokens_from_text, query_vector_db
|
||||
|
||||
try:
|
||||
from termcolor import colored
|
||||
except ImportError:
|
||||
@@ -148,7 +149,7 @@ class RetrieveUserProxyAgent(UserProxyAgent):
|
||||
self._ipython = get_ipython()
|
||||
self._doc_idx = -1 # the index of the current used doc
|
||||
self._results = {} # the results of the current query
|
||||
self.register_auto_reply(Agent, RetrieveUserProxyAgent._generate_retrieve_user_reply)
|
||||
self.register_reply(Agent, RetrieveUserProxyAgent._generate_retrieve_user_reply)
|
||||
|
||||
@staticmethod
|
||||
def get_max_tokens(model="gpt-3.5-turbo"):
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import asyncio
|
||||
from collections import defaultdict
|
||||
import copy
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
from flaml.autogen import oai
|
||||
from .agent import Agent
|
||||
from flaml.autogen.code_utils import (
|
||||
DEFAULT_MODEL,
|
||||
UNKNOWN,
|
||||
@@ -13,6 +13,8 @@ from flaml.autogen.code_utils import (
|
||||
infer_lang,
|
||||
)
|
||||
|
||||
from .agent import Agent
|
||||
|
||||
try:
|
||||
from termcolor import colored
|
||||
except ImportError:
|
||||
@@ -21,11 +23,11 @@ except ImportError:
|
||||
return x
|
||||
|
||||
|
||||
class ResponsiveAgent(Agent):
|
||||
"""(Experimental) A class for generic responsive agents which can be configured as assistant or user proxy.
|
||||
class ConversableAgent(Agent):
|
||||
"""(In preview) A class for generic conversable agents which can be configured as assistant or user proxy.
|
||||
|
||||
After receiving each message, the agent will send a reply to the sender unless the msg is a termination msg.
|
||||
For example, AssistantAgent and UserProxyAgent are subclasses of ResponsiveAgent,
|
||||
For example, AssistantAgent and UserProxyAgent are subclasses of this class,
|
||||
configured with different default settings.
|
||||
|
||||
To modify auto reply, override `generate_reply` method.
|
||||
@@ -119,12 +121,12 @@ class ResponsiveAgent(Agent):
|
||||
self._default_auto_reply = default_auto_reply
|
||||
self._reply_func_list = []
|
||||
self.reply_at_receive = defaultdict(bool)
|
||||
self.register_auto_reply([Agent, None], ResponsiveAgent.generate_oai_reply)
|
||||
self.register_auto_reply([Agent, None], ResponsiveAgent.generate_code_execution_reply)
|
||||
self.register_auto_reply([Agent, None], ResponsiveAgent.generate_function_call_reply)
|
||||
self.register_auto_reply([Agent, None], ResponsiveAgent.check_termination_and_human_reply)
|
||||
self.register_reply([Agent, None], ConversableAgent.generate_oai_reply)
|
||||
self.register_reply([Agent, None], ConversableAgent.generate_code_execution_reply)
|
||||
self.register_reply([Agent, None], ConversableAgent.generate_function_call_reply)
|
||||
self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)
|
||||
|
||||
def register_auto_reply(
|
||||
def register_reply(
|
||||
self,
|
||||
trigger: Union[Type[Agent], str, Agent, Callable[[Agent], bool], List],
|
||||
reply_func: Callable,
|
||||
@@ -151,7 +153,7 @@ class ResponsiveAgent(Agent):
|
||||
The function takes a recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
|
||||
```python
|
||||
def reply_func(
|
||||
recipient: ResponsiveAgent,
|
||||
recipient: ConversableAgent,
|
||||
messages: Optional[List[Dict]] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
config: Optional[Any] = None,
|
||||
@@ -499,7 +501,7 @@ class ResponsiveAgent(Agent):
|
||||
|
||||
def initiate_chat(
|
||||
self,
|
||||
recipient: "ResponsiveAgent",
|
||||
recipient: "ConversableAgent",
|
||||
clear_history: Optional[bool] = True,
|
||||
silent: Optional[bool] = False,
|
||||
**context,
|
||||
@@ -522,7 +524,7 @@ class ResponsiveAgent(Agent):
|
||||
|
||||
async def a_initiate_chat(
|
||||
self,
|
||||
recipient: "ResponsiveAgent",
|
||||
recipient: "ConversableAgent",
|
||||
clear_history: Optional[bool] = True,
|
||||
silent: Optional[bool] = False,
|
||||
**context,
|
||||
@@ -611,7 +613,7 @@ class ResponsiveAgent(Agent):
|
||||
if messages is None:
|
||||
messages = self._oai_messages[sender]
|
||||
last_n_messages = code_execution_config.pop("last_n_messages", 1)
|
||||
for i in range(last_n_messages):
|
||||
for i in range(min(len(messages), last_n_messages)):
|
||||
message = messages[-(i + 1)]
|
||||
code_blocks = extract_code(message["content"])
|
||||
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
|
||||
@@ -895,10 +897,11 @@ class ResponsiveAgent(Agent):
|
||||
exitcode, logs, image = (
|
||||
1,
|
||||
f"unknown language {lang}",
|
||||
self._code_execution_config["use_docker"],
|
||||
None,
|
||||
)
|
||||
# raise NotImplementedError
|
||||
self._code_execution_config["use_docker"] = image
|
||||
if image is not None:
|
||||
self._code_execution_config["use_docker"] = image
|
||||
logs_all += "\n" + logs
|
||||
if exitcode != 0:
|
||||
return exitcode, logs_all
|
||||
@@ -1,8 +1,9 @@
|
||||
from dataclasses import dataclass
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from .agent import Agent
|
||||
from .responsive_agent import ResponsiveAgent
|
||||
from .conversable_agent import ConversableAgent
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -12,6 +13,7 @@ class GroupChat:
|
||||
agents: List[Agent]
|
||||
messages: List[Dict]
|
||||
max_round: int = 10
|
||||
admin_name: str = "Admin" # the name of the admin agent
|
||||
|
||||
@property
|
||||
def agent_names(self) -> List[str]:
|
||||
@@ -38,7 +40,7 @@ class GroupChat:
|
||||
Read the following conversation.
|
||||
Then select the next role from {self.agent_names} to play. Only return the role."""
|
||||
|
||||
def select_speaker(self, last_speaker: Agent, selector: ResponsiveAgent):
|
||||
def select_speaker(self, last_speaker: Agent, selector: ConversableAgent):
|
||||
"""Select the next speaker."""
|
||||
selector.update_system_message(self.select_speaker_msg())
|
||||
final, name = selector.generate_oai_reply(
|
||||
@@ -62,7 +64,7 @@ Then select the next role from {self.agent_names} to play. Only return the role.
|
||||
return "\n".join([f"{agent.name}: {agent.system_message}" for agent in self.agents])
|
||||
|
||||
|
||||
class GroupChatManager(ResponsiveAgent):
|
||||
class GroupChatManager(ConversableAgent):
|
||||
"""(In preview) A chat manager agent that can manage a group chat of multiple agents."""
|
||||
|
||||
def __init__(
|
||||
@@ -83,7 +85,7 @@ class GroupChatManager(ResponsiveAgent):
|
||||
system_message=system_message,
|
||||
**kwargs,
|
||||
)
|
||||
self.register_auto_reply(Agent, GroupChatManager.run_chat, config=groupchat, reset_config=GroupChat.reset)
|
||||
self.register_reply(Agent, GroupChatManager.run_chat, config=groupchat, reset_config=GroupChat.reset)
|
||||
# self._random = random.Random(seed)
|
||||
|
||||
def run_chat(
|
||||
@@ -97,21 +99,36 @@ class GroupChatManager(ResponsiveAgent):
|
||||
messages = self._oai_messages[sender]
|
||||
message = messages[-1]
|
||||
speaker = sender
|
||||
for i in range(config.max_round):
|
||||
groupchat = config
|
||||
for i in range(groupchat.max_round):
|
||||
# set the name to speaker's name if the role is not function
|
||||
if message["role"] != "function":
|
||||
message["name"] = speaker.name
|
||||
config.messages.append(message)
|
||||
groupchat.messages.append(message)
|
||||
# broadcast the message to all agents except the speaker
|
||||
for agent in config.agents:
|
||||
for agent in groupchat.agents:
|
||||
if agent != speaker:
|
||||
self.send(message, agent, request_reply=False, silent=True)
|
||||
if i != config.max_round - 1:
|
||||
# speaker selection msg from an agent
|
||||
speaker = config.select_speaker(speaker, self)
|
||||
if i == groupchat.max_round - 1:
|
||||
# the last round
|
||||
break
|
||||
try:
|
||||
# select the next speaker
|
||||
speaker = groupchat.select_speaker(speaker, self)
|
||||
# let the speaker speak
|
||||
reply = speaker.generate_reply(sender=self)
|
||||
if reply is None:
|
||||
break
|
||||
speaker.send(reply, self, request_reply=False)
|
||||
message = self.last_message(speaker)
|
||||
except KeyboardInterrupt:
|
||||
# let the admin agent speak if interrupted
|
||||
if groupchat.admin_name in groupchat.agent_names:
|
||||
# admin agent is one of the participants
|
||||
speaker = groupchat.agent_by_name(groupchat.admin_name)
|
||||
reply = speaker.generate_reply(sender=self)
|
||||
else:
|
||||
# admin agent is not found in the participants
|
||||
raise
|
||||
if reply is None:
|
||||
break
|
||||
# The speaker sends the message without requesting a reply
|
||||
speaker.send(reply, self, request_reply=False)
|
||||
message = self.last_message(speaker)
|
||||
return True, None
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
from .responsive_agent import ResponsiveAgent
|
||||
from typing import Callable, Dict, Optional, Union
|
||||
|
||||
from .conversable_agent import ConversableAgent
|
||||
|
||||
class UserProxyAgent(ResponsiveAgent):
|
||||
|
||||
class UserProxyAgent(ConversableAgent):
|
||||
"""(In preview) A proxy agent for the user, that can execute code and provide feedback to the other agents.
|
||||
|
||||
UserProxyAgent is a subclass of ResponsiveAgent configured with `human_input_mode` to ALWAYS
|
||||
UserProxyAgent is a subclass of ConversableAgent configured with `human_input_mode` to ALWAYS
|
||||
and `llm_config` to False. By default, the agent will prompt for human input every time a message is received.
|
||||
Code execution is enabled by default. LLM-based auto reply is disabled by default.
|
||||
To modify auto reply, register a method with (`register_auto_reply`)[responsive_agent#register_auto_reply].
|
||||
To modify auto reply, register a method with (`register_reply`)[conversable_agent#register_reply].
|
||||
To modify the way to get human input, override `get_human_input` method.
|
||||
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
|
||||
`run_code`, and `execute_function` methods respectively.
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
import pathlib
|
||||
from typing import List, Dict, Tuple, Optional, Union, Callable
|
||||
import re
|
||||
import time
|
||||
from hashlib import md5
|
||||
import logging
|
||||
from typing import Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from flaml.autogen import oai
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from typing import Optional
|
||||
from flaml.autogen import oai, DEFAULT_MODEL
|
||||
|
||||
from flaml.autogen import DEFAULT_MODEL, oai
|
||||
|
||||
_MATH_PROMPT = "{problem} Solve the problem carefully. Simplify your answer as much as possible. Put the final answer in \\boxed{{}}."
|
||||
_MATH_CONFIG = {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
from flaml.autogen.oai.completion import Completion, ChatCompletion
|
||||
from flaml.autogen.oai.completion import ChatCompletion, Completion
|
||||
from flaml.autogen.oai.openai_utils import (
|
||||
get_config_list,
|
||||
config_list_from_json,
|
||||
config_list_from_models,
|
||||
config_list_gpt4_gpt35,
|
||||
config_list_openai_aoai,
|
||||
config_list_from_models,
|
||||
config_list_from_json,
|
||||
get_config_list,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
|
||||
@@ -1,28 +1,31 @@
|
||||
from time import sleep
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Optional, Dict, Callable, Union
|
||||
import sys
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from time import sleep
|
||||
from typing import Callable, Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
from flaml import tune, BlendSearch
|
||||
from flaml.tune.space import is_constant
|
||||
|
||||
from flaml import BlendSearch, tune
|
||||
from flaml.automl.logger import logger_formatter
|
||||
from flaml.tune.space import is_constant
|
||||
|
||||
from .openai_utils import get_key
|
||||
|
||||
try:
|
||||
import openai
|
||||
from openai.error import (
|
||||
ServiceUnavailableError,
|
||||
RateLimitError,
|
||||
APIError,
|
||||
InvalidRequestError,
|
||||
APIConnectionError,
|
||||
Timeout,
|
||||
AuthenticationError,
|
||||
)
|
||||
from openai import Completion as openai_Completion
|
||||
import diskcache
|
||||
import openai
|
||||
from openai import Completion as openai_Completion
|
||||
from openai.error import (
|
||||
APIConnectionError,
|
||||
APIError,
|
||||
AuthenticationError,
|
||||
InvalidRequestError,
|
||||
RateLimitError,
|
||||
ServiceUnavailableError,
|
||||
Timeout,
|
||||
)
|
||||
|
||||
ERROR = None
|
||||
except ImportError:
|
||||
@@ -48,6 +51,7 @@ class Completion(openai_Completion):
|
||||
"gpt-3.5-turbo-0301", # deprecate in Sep
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-35-turbo",
|
||||
"gpt-4",
|
||||
"gpt-4-32k",
|
||||
@@ -70,6 +74,7 @@ class Completion(openai_Completion):
|
||||
"gpt-3.5-turbo-0301": (0.0015, 0.002), # deprecate in Sep
|
||||
"gpt-3.5-turbo-0613": (0.0015, 0.002),
|
||||
"gpt-3.5-turbo-16k": (0.003, 0.004),
|
||||
"gpt-3.5-turbo-16k-0613": (0.003, 0.004),
|
||||
"gpt-35-turbo": 0.002,
|
||||
"gpt-4": (0.03, 0.06),
|
||||
"gpt-4-32k": (0.06, 0.12),
|
||||
@@ -695,7 +700,7 @@ class Completion(openai_Completion):
|
||||
E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`.
|
||||
The actual prompt will be:
|
||||
"Complete the following sentence: Today I feel".
|
||||
More examples can be found at [templating](/docs/Use-Cases/Autogen#templating).
|
||||
More examples can be found at [templating](https://microsoft.github.io/autogen/docs/Use-Cases/enhanced_inference#templating).
|
||||
use_cache (bool, Optional): Whether to use cached responses.
|
||||
config_list (List, Optional): List of configurations for the completion to try.
|
||||
The first one that does not raise an error will be used.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
import json
|
||||
from typing import List, Optional, Dict, Set, Union
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict, List, Optional, Set, Union
|
||||
|
||||
NON_CACHE_KEY = ["api_key", "api_base", "api_type", "api_version"]
|
||||
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
from typing import List, Union, Dict, Tuple
|
||||
import os
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
import glob
|
||||
import tiktoken
|
||||
import chromadb
|
||||
from chromadb.api import API
|
||||
import chromadb.utils.embedding_functions as ef
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict, List, Tuple, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import chromadb
|
||||
import chromadb.utils.embedding_functions as ef
|
||||
import requests
|
||||
import tiktoken
|
||||
from chromadb.api import API
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
TEXT_FORMATS = ["txt", "json", "csv", "tsv", "md", "html", "htm", "rtf", "rst", "jsonl", "log", "xml", "yaml", "yml"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from flaml.automl.automl import AutoML, size
|
||||
from flaml.automl.logger import logger_formatter
|
||||
from flaml.automl.state import SearchState, AutoMLState
|
||||
from flaml.automl.state import AutoMLState, SearchState
|
||||
|
||||
__all__ = ["AutoML", "AutoMLState", "SearchState", "logger_formatter", "size"]
|
||||
|
||||
@@ -3,40 +3,41 @@
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
from __future__ import annotations
|
||||
import time
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Callable, List, Union, Optional
|
||||
import time
|
||||
from functools import partial
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import logging
|
||||
import json
|
||||
|
||||
from flaml.automl.state import SearchState, AutoMLState
|
||||
from flaml import tune
|
||||
from flaml.automl.logger import logger, logger_formatter
|
||||
from flaml.automl.ml import train_estimator
|
||||
|
||||
from flaml.automl.time_series import TimeSeriesDataset
|
||||
from flaml.config import (
|
||||
MIN_SAMPLE_TRAIN,
|
||||
MEM_THRES,
|
||||
RANDOM_SEED,
|
||||
SMALL_LARGE_THRES,
|
||||
CV_HOLDOUT_THRESHOLD,
|
||||
SPLIT_RATIO,
|
||||
N_SPLITS,
|
||||
SAMPLE_MULTIPLY_FACTOR,
|
||||
)
|
||||
from flaml.automl.spark import DataFrame, Series, psDataFrame, psSeries
|
||||
from flaml.automl.state import AutoMLState, SearchState
|
||||
from flaml.automl.task.factory import task_factory
|
||||
|
||||
# TODO check to see when we can remove these
|
||||
from flaml.automl.task.task import CLASSIFICATION, Task
|
||||
from flaml.automl.task.factory import task_factory
|
||||
from flaml import tune
|
||||
from flaml.automl.logger import logger, logger_formatter
|
||||
from flaml.automl.time_series import TimeSeriesDataset
|
||||
from flaml.automl.training_log import training_log_reader, training_log_writer
|
||||
from flaml.config import (
|
||||
CV_HOLDOUT_THRESHOLD,
|
||||
MEM_THRES,
|
||||
MIN_SAMPLE_TRAIN,
|
||||
N_SPLITS,
|
||||
RANDOM_SEED,
|
||||
SAMPLE_MULTIPLY_FACTOR,
|
||||
SMALL_LARGE_THRES,
|
||||
SPLIT_RATIO,
|
||||
)
|
||||
from flaml.default import suggest_learner
|
||||
from flaml.version import __version__ as flaml_version
|
||||
from flaml.automl.spark import psDataFrame, psSeries, DataFrame, Series
|
||||
from flaml.tune.spark.utils import check_spark, get_broadcast_data
|
||||
from flaml.version import __version__ as flaml_version
|
||||
|
||||
ERROR = (
|
||||
DataFrame is None and ImportError("please install flaml[automl] option to use the flaml.automl package.") or None
|
||||
@@ -211,9 +212,9 @@ class AutoML(BaseEstimator):
|
||||
- if "data:path" use data-dependent defaults which are stored at path;
|
||||
- if "static", use data-independent defaults.
|
||||
If dict, keys are the name of the estimators, and values are the starting
|
||||
hyperparamter configurations for the corresponding estimators.
|
||||
The value can be a single hyperparamter configuration dict or a list
|
||||
of hyperparamter configuration dicts.
|
||||
hyperparameter configurations for the corresponding estimators.
|
||||
The value can be a single hyperparameter configuration dict or a list
|
||||
of hyperparameter configuration dicts.
|
||||
In the following code example, we get starting_points from the
|
||||
`automl` object and use them in the `new_automl` object.
|
||||
e.g.,
|
||||
@@ -246,7 +247,7 @@ class AutoML(BaseEstimator):
|
||||
search is considered to converge.
|
||||
force_cancel: boolean, default=False | Whether to forcely cancel Spark jobs if the
|
||||
search time exceeded the time budget.
|
||||
append_log: boolean, default=False | Whetehr to directly append the log
|
||||
append_log: boolean, default=False | Whether to directly append the log
|
||||
records to the input log file if it exists.
|
||||
auto_augment: boolean, default=True | Whether to automatically
|
||||
augment rare classes.
|
||||
@@ -476,12 +477,12 @@ class AutoML(BaseEstimator):
|
||||
|
||||
@property
|
||||
def feature_transformer(self):
|
||||
"""Returns AutoML Transformer"""
|
||||
"""Returns feature transformer which is used to preprocess data before applying training or inference."""
|
||||
return getattr(self, "_transformer", None)
|
||||
|
||||
@property
|
||||
def label_transformer(self):
|
||||
"""Returns AutoML label transformer"""
|
||||
"""Returns label transformer which is used to preprocess labels before scoring, and inverse transform labels after inference."""
|
||||
return getattr(self, "_label_transformer", None)
|
||||
|
||||
@property
|
||||
@@ -606,7 +607,7 @@ class AutoML(BaseEstimator):
|
||||
|
||||
Args:
|
||||
learner_name: A string of the learner's name.
|
||||
learner_class: A subclass of flaml.model.BaseEstimator.
|
||||
learner_class: A subclass of flaml.automl.model.BaseEstimator.
|
||||
"""
|
||||
self._state.learner_classes[learner_name] = learner_class
|
||||
|
||||
@@ -1347,9 +1348,9 @@ class AutoML(BaseEstimator):
|
||||
- if "data:path" use data-dependent defaults which are stored at path;
|
||||
- if "static", use data-independent defaults.
|
||||
If dict, keys are the name of the estimators, and values are the starting
|
||||
hyperparamter configurations for the corresponding estimators.
|
||||
The value can be a single hyperparamter configuration dict or a list
|
||||
of hyperparamter configuration dicts.
|
||||
hyperparameter configurations for the corresponding estimators.
|
||||
The value can be a single hyperparameter configuration dict or a list
|
||||
of hyperparameter configuration dicts.
|
||||
In the following code example, we get starting_points from the
|
||||
`automl` object and use them in the `new_automl` object.
|
||||
e.g.,
|
||||
@@ -1381,7 +1382,7 @@ class AutoML(BaseEstimator):
|
||||
early_stop: boolean, default=False | Whether to stop early if the
|
||||
search is considered to converge.
|
||||
force_cancel: boolean, default=False | Whether to forcely cancel the PySpark job if overtime.
|
||||
append_log: boolean, default=False | Whetehr to directly append the log
|
||||
append_log: boolean, default=False | Whether to directly append the log
|
||||
records to the input log file if it exists.
|
||||
auto_augment: boolean, default=True | Whether to automatically
|
||||
augment rare classes.
|
||||
@@ -2325,7 +2326,7 @@ class AutoML(BaseEstimator):
|
||||
)
|
||||
time_used = time.time() - start_run_time
|
||||
better = False
|
||||
if analysis.trials:
|
||||
if analysis.trials and analysis.trials[-1].last_result:
|
||||
result = analysis.trials[-1].last_result
|
||||
search_state.update(result, time_used=time_used)
|
||||
if self._estimator_index is None:
|
||||
@@ -2647,7 +2648,7 @@ class AutoML(BaseEstimator):
|
||||
if self._estimator_index == len(estimator_list):
|
||||
self._estimator_index = 0
|
||||
return estimator_list[self._estimator_index]
|
||||
min_estimated_cost, selected = np.Inf, None
|
||||
min_estimated_cost, selected = np.inf, None
|
||||
inv = []
|
||||
untried_exists = False
|
||||
for i, estimator in enumerate(estimator_list):
|
||||
|
||||
1
flaml/automl/contrib/__init__.py
Normal file
1
flaml/automl/contrib/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .histgb import HistGradientBoostingEstimator
|
||||
75
flaml/automl/contrib/histgb.py
Normal file
75
flaml/automl/contrib/histgb.py
Normal file
@@ -0,0 +1,75 @@
|
||||
try:
|
||||
from sklearn.ensemble import HistGradientBoostingClassifier, HistGradientBoostingRegressor
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from flaml import tune
|
||||
from flaml.automl.model import SKLearnEstimator
|
||||
from flaml.automl.task import Task
|
||||
|
||||
|
||||
class HistGradientBoostingEstimator(SKLearnEstimator):
|
||||
"""The class for tuning Histogram Gradient Boosting."""
|
||||
|
||||
ITER_HP = "max_iter"
|
||||
HAS_CALLBACK = False
|
||||
DEFAULT_ITER = 100
|
||||
|
||||
@classmethod
|
||||
def search_space(cls, data_size: int, task, **params) -> dict:
|
||||
upper = max(5, min(32768, int(data_size[0]))) # upper must be larger than lower
|
||||
return {
|
||||
"n_estimators": {
|
||||
"domain": tune.lograndint(lower=4, upper=upper),
|
||||
"init_value": 4,
|
||||
"low_cost_init_value": 4,
|
||||
},
|
||||
"max_leaves": {
|
||||
"domain": tune.lograndint(lower=4, upper=upper),
|
||||
"init_value": 4,
|
||||
"low_cost_init_value": 4,
|
||||
},
|
||||
"min_samples_leaf": {
|
||||
"domain": tune.lograndint(lower=2, upper=2**7 + 1),
|
||||
"init_value": 20,
|
||||
},
|
||||
"learning_rate": {
|
||||
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
|
||||
"init_value": 0.1,
|
||||
},
|
||||
"log_max_bin": { # log transformed with base 2, <= 256
|
||||
"domain": tune.lograndint(lower=3, upper=9),
|
||||
"init_value": 8,
|
||||
},
|
||||
"l2_regularization": {
|
||||
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
|
||||
"init_value": 1.0,
|
||||
},
|
||||
}
|
||||
|
||||
def config2params(self, config: dict) -> dict:
|
||||
params = super().config2params(config)
|
||||
if "log_max_bin" in params:
|
||||
params["max_bins"] = (1 << params.pop("log_max_bin")) - 1
|
||||
if "max_leaves" in params:
|
||||
params["max_leaf_nodes"] = params.get("max_leaf_nodes", params.pop("max_leaves"))
|
||||
if "n_estimators" in params:
|
||||
params["max_iter"] = params.get("max_iter", params.pop("n_estimators"))
|
||||
if "random_state" not in params:
|
||||
params["random_state"] = 24092023
|
||||
if "n_jobs" in params:
|
||||
params.pop("n_jobs")
|
||||
return params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
task: Task,
|
||||
**config,
|
||||
):
|
||||
super().__init__(task, **config)
|
||||
self.params["verbose"] = 0
|
||||
|
||||
if self._task.is_classification():
|
||||
self.estimator_class = HistGradientBoostingClassifier
|
||||
else:
|
||||
self.estimator_class = HistGradientBoostingRegressor
|
||||
@@ -2,15 +2,17 @@
|
||||
# * Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
import numpy as np
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Union
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.automl.spark import DataFrame, Series, pd, ps, psDataFrame, psSeries
|
||||
from flaml.automl.training_log import training_log_reader
|
||||
from flaml.automl.spark import ps, psDataFrame, psSeries, DataFrame, Series, pd
|
||||
|
||||
try:
|
||||
from scipy.sparse import vstack, issparse
|
||||
from scipy.sparse import issparse, vstack
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
@@ -41,8 +43,9 @@ def load_openml_dataset(dataset_id, data_dir=None, random_state=0, dataset_forma
|
||||
y_train: A series or array of labels for training data.
|
||||
y_test: A series or array of labels for test data.
|
||||
"""
|
||||
import openml
|
||||
import pickle
|
||||
|
||||
import openml
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
filename = "openml_ds" + str(dataset_id) + ".pkl"
|
||||
@@ -93,9 +96,10 @@ def load_openml_task(task_id, data_dir):
|
||||
y_train: A series of labels for training data.
|
||||
y_test: A series of labels for test data.
|
||||
"""
|
||||
import openml
|
||||
import pickle
|
||||
|
||||
import openml
|
||||
|
||||
task = openml.tasks.get_task(task_id)
|
||||
filename = "openml_task" + str(task_id) + ".pkl"
|
||||
filepath = os.path.join(data_dir, filename)
|
||||
@@ -341,8 +345,8 @@ class DataTransformer:
|
||||
drop = True
|
||||
else:
|
||||
drop = False
|
||||
from sklearn.impute import SimpleImputer
|
||||
from sklearn.compose import ColumnTransformer
|
||||
from sklearn.impute import SimpleImputer
|
||||
|
||||
self.transformer = ColumnTransformer(
|
||||
[
|
||||
|
||||
@@ -2,30 +2,30 @@
|
||||
# * Copyright (c) FLAML authors. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
import time
|
||||
from typing import Union, Callable, TypeVar, Optional, Tuple
|
||||
import logging
|
||||
import time
|
||||
from typing import Callable, Optional, Tuple, TypeVar, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
from flaml.automl.data import group_counts
|
||||
from flaml.automl.task.task import Task
|
||||
from flaml.automl.model import BaseEstimator, TransformersEstimator
|
||||
from flaml.automl.spark import psDataFrame, psSeries, ERROR as SPARK_ERROR, Series, DataFrame
|
||||
from flaml.automl.spark import ERROR as SPARK_ERROR
|
||||
from flaml.automl.spark import DataFrame, Series, psDataFrame, psSeries
|
||||
from flaml.automl.task.task import Task
|
||||
|
||||
try:
|
||||
from sklearn.metrics import (
|
||||
mean_squared_error,
|
||||
r2_score,
|
||||
roc_auc_score,
|
||||
accuracy_score,
|
||||
mean_absolute_error,
|
||||
log_loss,
|
||||
average_precision_score,
|
||||
f1_score,
|
||||
log_loss,
|
||||
mean_absolute_error,
|
||||
mean_absolute_percentage_error,
|
||||
mean_squared_error,
|
||||
ndcg_score,
|
||||
r2_score,
|
||||
roc_auc_score,
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
@@ -323,7 +323,7 @@ def compute_estimator(
|
||||
estimator_name: str,
|
||||
eval_method: str,
|
||||
eval_metric: Union[str, Callable],
|
||||
best_val_loss=np.Inf,
|
||||
best_val_loss=np.inf,
|
||||
n_jobs: Optional[int] = 1, # some estimators of EstimatorSubclass don't accept n_jobs. Should be None in that case.
|
||||
estimator_class: Optional[EstimatorSubclass] = None,
|
||||
cv_score_agg_func: Optional[callable] = None,
|
||||
@@ -567,14 +567,19 @@ def _eval_estimator(
|
||||
|
||||
pred_time = (time.time() - pred_start) / num_val_rows
|
||||
|
||||
val_loss = metric_loss_score(
|
||||
eval_metric,
|
||||
y_processed_predict=val_pred_y,
|
||||
y_processed_true=y_val,
|
||||
labels=labels,
|
||||
sample_weight=weight_val,
|
||||
groups=groups_val,
|
||||
)
|
||||
try:
|
||||
val_loss = metric_loss_score(
|
||||
eval_metric,
|
||||
y_processed_predict=val_pred_y,
|
||||
y_processed_true=y_val,
|
||||
labels=labels,
|
||||
sample_weight=weight_val,
|
||||
groups=groups_val,
|
||||
)
|
||||
except ValueError as e:
|
||||
# `r2_score` and other metrics may raise a `ValueError` when a model returns `inf` or `nan` values. In this case, we set the val_loss to infinity.
|
||||
val_loss = np.inf
|
||||
logger.warning(f"ValueError {e} happened in `metric_loss_score`, set `val_loss` to `np.inf`")
|
||||
metric_for_logging = {"pred_time": pred_time}
|
||||
if log_training_metric:
|
||||
train_pred_y = get_y_pred(estimator, X_train, eval_metric, task)
|
||||
|
||||
@@ -2,36 +2,43 @@
|
||||
# * Copyright (c) FLAML authors. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import shutil
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from functools import partial
|
||||
import signal
|
||||
import os
|
||||
from typing import Callable, List, Union
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import logging
|
||||
import shutil
|
||||
import sys
|
||||
import math
|
||||
|
||||
from flaml import tune
|
||||
from flaml.automl.data import (
|
||||
group_counts,
|
||||
)
|
||||
from flaml.automl.task.factory import task_factory
|
||||
from flaml.automl.task.task import (
|
||||
Task,
|
||||
NLG_TASKS,
|
||||
SEQCLASSIFICATION,
|
||||
SEQREGRESSION,
|
||||
TOKENCLASSIFICATION,
|
||||
SUMMARIZATION,
|
||||
NLG_TASKS,
|
||||
TOKENCLASSIFICATION,
|
||||
Task,
|
||||
)
|
||||
from flaml.automl.task.factory import task_factory
|
||||
|
||||
try:
|
||||
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
|
||||
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.dummy import DummyClassifier, DummyRegressor
|
||||
from sklearn.ensemble import (
|
||||
ExtraTreesClassifier,
|
||||
ExtraTreesRegressor,
|
||||
RandomForestClassifier,
|
||||
RandomForestRegressor,
|
||||
)
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from xgboost import __version__ as xgboost_version
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
@@ -40,13 +47,14 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from flaml.automl.spark import psDataFrame, sparkDataFrame, psSeries, ERROR as SPARK_ERROR, DataFrame, Series
|
||||
from flaml.automl.spark.utils import len_labels, to_pandas_on_spark
|
||||
from flaml.automl.spark import ERROR as SPARK_ERROR
|
||||
from flaml.automl.spark import DataFrame, Series, psDataFrame, psSeries, sparkDataFrame
|
||||
from flaml.automl.spark.configs import (
|
||||
ParamList_LightGBM_Classifier,
|
||||
ParamList_LightGBM_Regressor,
|
||||
ParamList_LightGBM_Ranker,
|
||||
ParamList_LightGBM_Regressor,
|
||||
)
|
||||
from flaml.automl.spark.utils import len_labels, to_pandas_on_spark
|
||||
|
||||
if DataFrame is not None:
|
||||
from pandas import to_datetime
|
||||
@@ -61,10 +69,18 @@ except ImportError:
|
||||
resource = None
|
||||
|
||||
try:
|
||||
from lightgbm import LGBMClassifier, LGBMRegressor, LGBMRanker
|
||||
from lightgbm import LGBMClassifier, LGBMRanker, LGBMRegressor
|
||||
except ImportError:
|
||||
LGBMClassifier = LGBMRegressor = LGBMRanker = None
|
||||
|
||||
xgb_callback = False
|
||||
try:
|
||||
from xgboost.callback import TrainingCallback
|
||||
|
||||
xgb_callback = True
|
||||
except ImportError: # for xgboost<1.3
|
||||
TrainingCallback = object
|
||||
|
||||
logger = logging.getLogger("flaml.automl")
|
||||
# FREE_MEM_RATIO = 0.2
|
||||
|
||||
@@ -212,10 +228,10 @@ class BaseEstimator:
|
||||
model = self.estimator_class(**self.params)
|
||||
if logger.level == logging.DEBUG:
|
||||
# xgboost 1.6 doesn't display all the params in the model str
|
||||
logger.debug(f"flaml.model - {model} fit started with params {self.params}")
|
||||
logger.debug(f"flaml.automl.model - {model} fit started with params {self.params}")
|
||||
model.fit(X_train, y_train, **kwargs)
|
||||
if logger.level == logging.DEBUG:
|
||||
logger.debug(f"flaml.model - {model} fit finished")
|
||||
logger.debug(f"flaml.automl.model - {model} fit finished")
|
||||
train_time = time.time() - current_time
|
||||
self._model = model
|
||||
return train_time
|
||||
@@ -319,8 +335,7 @@ class BaseEstimator:
|
||||
Returns:
|
||||
The evaluation score on the validation dataset.
|
||||
"""
|
||||
from .ml import metric_loss_score
|
||||
from .ml import is_min_metric
|
||||
from .ml import is_min_metric, metric_loss_score
|
||||
|
||||
if self._model is not None:
|
||||
if self._task == "rank":
|
||||
@@ -455,10 +470,10 @@ class SparkEstimator(BaseEstimator):
|
||||
current_time = time.time()
|
||||
pipeline_model = self.estimator_class(**self.params, **kwargs)
|
||||
if logger.level == logging.DEBUG:
|
||||
logger.debug(f"flaml.model - {pipeline_model} fit started with params {self.params}")
|
||||
logger.debug(f"flaml.automl.model - {pipeline_model} fit started with params {self.params}")
|
||||
pipeline_model.fit(df_train)
|
||||
if logger.level == logging.DEBUG:
|
||||
logger.debug(f"flaml.model - {pipeline_model} fit finished")
|
||||
logger.debug(f"flaml.automl.model - {pipeline_model} fit finished")
|
||||
train_time = time.time() - current_time
|
||||
self._model = pipeline_model
|
||||
return train_time
|
||||
@@ -690,12 +705,12 @@ class SparkLGBMEstimator(SparkEstimator):
|
||||
current_time = time.time()
|
||||
model = self.estimator_class(**self.params, **kwargs)
|
||||
if logger.level == logging.DEBUG:
|
||||
logger.debug(f"flaml.model - {model} fit started with params {self.params}")
|
||||
logger.debug(f"flaml.automl.model - {model} fit started with params {self.params}")
|
||||
self._model = model.fit(df_train)
|
||||
self._model.classes_ = self.model_classes_
|
||||
self._model.n_classes_ = self.model_n_classes_
|
||||
if logger.level == logging.DEBUG:
|
||||
logger.debug(f"flaml.model - {model} fit finished")
|
||||
logger.debug(f"flaml.automl.model - {model} fit finished")
|
||||
train_time = time.time() - current_time
|
||||
return train_time
|
||||
|
||||
@@ -758,7 +773,7 @@ class TransformersEstimator(BaseEstimator):
|
||||
return not self._kwargs.get("gpu_per_trial")
|
||||
|
||||
def _set_training_args(self, **kwargs):
|
||||
from .nlp.utils import date_str, Counter
|
||||
from .nlp.utils import Counter, date_str
|
||||
|
||||
for key, val in kwargs.items():
|
||||
assert key not in self.params, (
|
||||
@@ -872,10 +887,10 @@ class TransformersEstimator(BaseEstimator):
|
||||
|
||||
@property
|
||||
def data_collator(self):
|
||||
from flaml.automl.task.task import Task
|
||||
from flaml.automl.nlp.huggingface.data_collator import (
|
||||
task_to_datacollator_class,
|
||||
)
|
||||
from flaml.automl.task.task import Task
|
||||
|
||||
data_collator_class = task_to_datacollator_class.get(
|
||||
self._task.name if isinstance(self._task, Task) else self._task
|
||||
@@ -916,6 +931,7 @@ class TransformersEstimator(BaseEstimator):
|
||||
|
||||
from transformers import TrainerCallback
|
||||
from transformers.trainer_utils import set_seed
|
||||
|
||||
from .nlp.huggingface.trainer import TrainerForAuto
|
||||
|
||||
try:
|
||||
@@ -1145,6 +1161,7 @@ class TransformersEstimator(BaseEstimator):
|
||||
def predict(self, X, **pred_kwargs):
|
||||
import transformers
|
||||
from datasets import Dataset
|
||||
|
||||
from .nlp.huggingface.utils import postprocess_prediction_and_true
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
@@ -1401,18 +1418,16 @@ class LGBMEstimator(BaseEstimator):
|
||||
# since xgboost>=1.6.0, callbacks can't be passed in fit()
|
||||
self.params["callbacks"] = callbacks
|
||||
callbacks = None
|
||||
self._fit(
|
||||
X_train,
|
||||
y_train,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
if callbacks is None:
|
||||
self._fit(X_train, y_train, **kwargs)
|
||||
else:
|
||||
self._fit(X_train, y_train, callbacks=callbacks, **kwargs)
|
||||
if callbacks is None:
|
||||
# for xgboost>=1.6.0, pop callbacks to enable pickle
|
||||
callbacks = self.params.pop("callbacks")
|
||||
self._model.set_params(callbacks=callbacks[:-1])
|
||||
best_iteration = (
|
||||
self._model.get_booster().best_iteration
|
||||
getattr(self._model.get_booster(), "best_iteration", None)
|
||||
if isinstance(self, XGBoostSklearnEstimator)
|
||||
else self._model.best_iteration_
|
||||
)
|
||||
@@ -1510,8 +1525,6 @@ class XGBoostEstimator(SKLearnEstimator):
|
||||
# params["booster"] = params.get("booster", "gbtree")
|
||||
|
||||
# use_label_encoder is deprecated in 1.7.
|
||||
from xgboost import __version__ as xgboost_version
|
||||
|
||||
if xgboost_version < "1.7.0":
|
||||
params["use_label_encoder"] = params.get("use_label_encoder", False)
|
||||
if "n_jobs" in config:
|
||||
@@ -1559,7 +1572,7 @@ class XGBoostEstimator(SKLearnEstimator):
|
||||
obj=obj,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
self.params["n_estimators"] = self._model.best_iteration + 1
|
||||
self.params["n_estimators"] = getattr(self._model, "best_iteration", _n_estimators - 1) + 1
|
||||
else:
|
||||
self._model = xgb.train(self.params, dtrain, _n_estimators, obj=obj)
|
||||
self.params["n_estimators"] = _n_estimators
|
||||
@@ -1620,7 +1633,9 @@ class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
|
||||
if max_depth == 0:
|
||||
params["grow_policy"] = params.get("grow_policy", "lossguide")
|
||||
params["tree_method"] = params.get("tree_method", "hist")
|
||||
params["use_label_encoder"] = params.get("use_label_encoder", False)
|
||||
# use_label_encoder is deprecated in 1.7.
|
||||
if xgboost_version < "1.7.0":
|
||||
params["use_label_encoder"] = params.get("use_label_encoder", False)
|
||||
return params
|
||||
|
||||
def __init__(
|
||||
|
||||
@@ -4,16 +4,15 @@ This directory contains utility functions used by AutoNLP. Currently we support
|
||||
|
||||
Please refer to this [link](https://microsoft.github.io/FLAML/docs/Examples/AutoML-NLP) for examples.
|
||||
|
||||
|
||||
# Troubleshooting fine-tuning HPO for pre-trained language models
|
||||
|
||||
The frequent updates of transformers may lead to fluctuations in the results of tuning. To help users quickly troubleshoot the result of AutoNLP when a tuning failure occurs (e.g., failing to reproduce previous results), we have provided the following jupyter notebook:
|
||||
|
||||
* [Troubleshooting HPO for fine-tuning pre-trained language models](https://github.com/microsoft/FLAML/blob/main/notebook/research/acl2021.ipynb)
|
||||
- [Troubleshooting HPO for fine-tuning pre-trained language models](https://github.com/microsoft/FLAML/blob/main/notebook/research/acl2021.ipynb)
|
||||
|
||||
Our findings on troubleshooting fine-tuning the Electra and RoBERTa model for the GLUE dataset can be seen in the following paper published in ACL 2021:
|
||||
|
||||
* [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://arxiv.org/abs/2106.09204). Xueqing Liu, Chi Wang. ACL-IJCNLP 2021.
|
||||
- [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://arxiv.org/abs/2106.09204). Xueqing Liu, Chi Wang. ACL-IJCNLP 2021.
|
||||
|
||||
```bibtex
|
||||
@inproceedings{liu2021hpo,
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
from dataclasses import dataclass
|
||||
from transformers.data.data_collator import (
|
||||
DataCollatorWithPadding,
|
||||
DataCollatorForTokenClassification,
|
||||
DataCollatorForSeq2Seq,
|
||||
)
|
||||
from collections import OrderedDict
|
||||
from dataclasses import dataclass
|
||||
|
||||
from transformers.data.data_collator import (
|
||||
DataCollatorForSeq2Seq,
|
||||
DataCollatorForTokenClassification,
|
||||
DataCollatorWithPadding,
|
||||
)
|
||||
|
||||
from flaml.automl.task.task import (
|
||||
TOKENCLASSIFICATION,
|
||||
MULTICHOICECLASSIFICATION,
|
||||
SUMMARIZATION,
|
||||
SEQCLASSIFICATION,
|
||||
SEQREGRESSION,
|
||||
SUMMARIZATION,
|
||||
TOKENCLASSIFICATION,
|
||||
)
|
||||
|
||||
|
||||
@@ -19,6 +20,7 @@ from flaml.automl.task.task import (
|
||||
class DataCollatorForMultipleChoiceClassification(DataCollatorWithPadding):
|
||||
def __call__(self, features):
|
||||
from itertools import chain
|
||||
|
||||
import torch
|
||||
|
||||
label_name = "label" if "label" in features[0].keys() else "labels"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import argparse
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, List
|
||||
from typing import List, Optional
|
||||
|
||||
from flaml.automl.task.task import NLG_TASKS
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
from itertools import chain
|
||||
|
||||
import numpy as np
|
||||
from flaml.automl.task.task import (
|
||||
SUMMARIZATION,
|
||||
SEQREGRESSION,
|
||||
SEQCLASSIFICATION,
|
||||
MULTICHOICECLASSIFICATION,
|
||||
TOKENCLASSIFICATION,
|
||||
NLG_TASKS,
|
||||
)
|
||||
|
||||
from flaml.automl.data import pd
|
||||
from flaml.automl.task.task import (
|
||||
MULTICHOICECLASSIFICATION,
|
||||
NLG_TASKS,
|
||||
SEQCLASSIFICATION,
|
||||
SEQREGRESSION,
|
||||
SUMMARIZATION,
|
||||
TOKENCLASSIFICATION,
|
||||
)
|
||||
|
||||
|
||||
def todf(X, Y, column_name):
|
||||
@@ -377,6 +379,7 @@ def load_model(checkpoint_path, task, num_labels=None):
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
||||
from transformers import AutoConfig
|
||||
|
||||
from flaml.automl.task.task import (
|
||||
SEQCLASSIFICATION,
|
||||
SEQREGRESSION,
|
||||
@@ -384,10 +387,12 @@ def load_model(checkpoint_path, task, num_labels=None):
|
||||
)
|
||||
|
||||
def get_this_model(checkpoint_path, task, model_config):
|
||||
from transformers import AutoModelForSequenceClassification
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
from transformers import AutoModelForMultipleChoice
|
||||
from transformers import AutoModelForTokenClassification
|
||||
from transformers import (
|
||||
AutoModelForMultipleChoice,
|
||||
AutoModelForSeq2SeqLM,
|
||||
AutoModelForSequenceClassification,
|
||||
AutoModelForTokenClassification,
|
||||
)
|
||||
|
||||
if task in (SEQCLASSIFICATION, SEQREGRESSION):
|
||||
return AutoModelForSequenceClassification.from_pretrained(
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
from typing import Dict, Any
|
||||
from typing import Any, Dict
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.automl.task.task import (
|
||||
SUMMARIZATION,
|
||||
SEQREGRESSION,
|
||||
SEQCLASSIFICATION,
|
||||
MULTICHOICECLASSIFICATION,
|
||||
SEQCLASSIFICATION,
|
||||
SEQREGRESSION,
|
||||
SUMMARIZATION,
|
||||
TOKENCLASSIFICATION,
|
||||
)
|
||||
|
||||
|
||||
@@ -6,8 +6,10 @@ try:
|
||||
import pyspark.pandas as ps
|
||||
import pyspark.sql.functions as F
|
||||
import pyspark.sql.types as T
|
||||
from pyspark.pandas import DataFrame as psDataFrame
|
||||
from pyspark.pandas import Series as psSeries
|
||||
from pyspark.pandas import set_option
|
||||
from pyspark.sql import DataFrame as sparkDataFrame
|
||||
from pyspark.pandas import DataFrame as psDataFrame, Series as psSeries, set_option
|
||||
from pyspark.util import VersionUtils
|
||||
except ImportError:
|
||||
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
import numpy as np
|
||||
from typing import Union
|
||||
from flaml.automl.spark import psSeries, F
|
||||
|
||||
import numpy as np
|
||||
from pyspark.ml.evaluation import (
|
||||
BinaryClassificationEvaluator,
|
||||
RegressionEvaluator,
|
||||
MulticlassClassificationEvaluator,
|
||||
MultilabelClassificationEvaluator,
|
||||
RankingEvaluator,
|
||||
RegressionEvaluator,
|
||||
)
|
||||
|
||||
from flaml.automl.spark import F, psSeries
|
||||
|
||||
|
||||
def ps_group_counts(groups: Union[psSeries, np.ndarray]) -> np.ndarray:
|
||||
if isinstance(groups, np.ndarray):
|
||||
|
||||
@@ -1,17 +1,19 @@
|
||||
import logging
|
||||
from typing import Union, List, Optional, Tuple
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.automl.spark import (
|
||||
sparkDataFrame,
|
||||
ps,
|
||||
DataFrame,
|
||||
F,
|
||||
Series,
|
||||
T,
|
||||
_spark_major_minor_version,
|
||||
ps,
|
||||
psDataFrame,
|
||||
psSeries,
|
||||
_spark_major_minor_version,
|
||||
DataFrame,
|
||||
Series,
|
||||
set_option,
|
||||
sparkDataFrame,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
import inspect
|
||||
import copy
|
||||
import inspect
|
||||
import time
|
||||
from typing import Any, Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml import tune
|
||||
from flaml.automl.logger import logger
|
||||
from flaml.automl.ml import compute_estimator, train_estimator
|
||||
from flaml.automl.spark import DataFrame, Series, psDataFrame, psSeries
|
||||
from flaml.automl.time_series.ts_data import TimeSeriesDataset
|
||||
from flaml.automl.spark import psDataFrame, psSeries, DataFrame, Series
|
||||
|
||||
|
||||
class SearchState:
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from typing import Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.automl.data import DataFrame, Series
|
||||
from flaml.automl.task.task import Task, TS_FORECAST
|
||||
from flaml.automl.task.task import TS_FORECAST, Task
|
||||
|
||||
|
||||
def task_factory(
|
||||
|
||||
@@ -1,43 +1,44 @@
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Optional
|
||||
import numpy as np
|
||||
from flaml.automl.data import TS_TIMESTAMP_COL, concat
|
||||
from flaml.automl.ml import EstimatorSubclass, get_val_loss, default_cv_score_agg_func
|
||||
|
||||
from flaml.automl.task.task import (
|
||||
Task,
|
||||
get_classification_objective,
|
||||
TS_FORECAST,
|
||||
TS_FORECASTPANEL,
|
||||
)
|
||||
from flaml.config import RANDOM_SEED
|
||||
from flaml.automl.spark import ps, psDataFrame, psSeries, pd
|
||||
import numpy as np
|
||||
|
||||
from flaml.automl.data import TS_TIMESTAMP_COL, concat
|
||||
from flaml.automl.ml import EstimatorSubclass, default_cv_score_agg_func, get_val_loss
|
||||
from flaml.automl.spark import pd, ps, psDataFrame, psSeries
|
||||
from flaml.automl.spark.utils import (
|
||||
iloc_pandas_on_spark,
|
||||
len_labels,
|
||||
set_option,
|
||||
spark_kFold,
|
||||
train_test_split_pyspark,
|
||||
unique_pandas_on_spark,
|
||||
unique_value_first_index,
|
||||
len_labels,
|
||||
set_option,
|
||||
)
|
||||
from flaml.automl.task.task import (
|
||||
TS_FORECAST,
|
||||
TS_FORECASTPANEL,
|
||||
Task,
|
||||
get_classification_objective,
|
||||
)
|
||||
from flaml.config import RANDOM_SEED
|
||||
|
||||
try:
|
||||
from scipy.sparse import issparse
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
from sklearn.utils import shuffle
|
||||
from sklearn.model_selection import (
|
||||
train_test_split,
|
||||
RepeatedStratifiedKFold,
|
||||
RepeatedKFold,
|
||||
GroupKFold,
|
||||
TimeSeriesSplit,
|
||||
GroupShuffleSplit,
|
||||
RepeatedKFold,
|
||||
RepeatedStratifiedKFold,
|
||||
StratifiedGroupKFold,
|
||||
TimeSeriesSplit,
|
||||
train_test_split,
|
||||
)
|
||||
from sklearn.utils import shuffle
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
@@ -49,19 +50,20 @@ class GenericTask(Task):
|
||||
def estimators(self):
|
||||
if self._estimators is None:
|
||||
# put this into a function to avoid circular dependency
|
||||
from flaml.automl.contrib.histgb import HistGradientBoostingEstimator
|
||||
from flaml.automl.model import (
|
||||
XGBoostSklearnEstimator,
|
||||
XGBoostLimitDepthEstimator,
|
||||
RandomForestEstimator,
|
||||
LGBMEstimator,
|
||||
LRL1Classifier,
|
||||
LRL2Classifier,
|
||||
CatBoostEstimator,
|
||||
ExtraTreesEstimator,
|
||||
KNeighborsEstimator,
|
||||
LGBMEstimator,
|
||||
LRL1Classifier,
|
||||
LRL2Classifier,
|
||||
RandomForestEstimator,
|
||||
SparkLGBMEstimator,
|
||||
TransformersEstimator,
|
||||
TransformersEstimatorModelSelection,
|
||||
SparkLGBMEstimator,
|
||||
XGBoostLimitDepthEstimator,
|
||||
XGBoostSklearnEstimator,
|
||||
)
|
||||
|
||||
self._estimators = {
|
||||
@@ -77,6 +79,7 @@ class GenericTask(Task):
|
||||
"kneighbor": KNeighborsEstimator,
|
||||
"transformer": TransformersEstimator,
|
||||
"transformer_ms": TransformersEstimatorModelSelection,
|
||||
"histgb": HistGradientBoostingEstimator,
|
||||
}
|
||||
return self._estimators
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.automl.data import DataFrame, Series, psDataFrame, psSeries
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
@@ -2,26 +2,25 @@ import logging
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from scipy.sparse import issparse
|
||||
from sklearn.model_selection import (
|
||||
GroupKFold,
|
||||
TimeSeriesSplit,
|
||||
)
|
||||
|
||||
from flaml.automl.ml import get_val_loss, default_cv_score_agg_func
|
||||
from flaml.automl.time_series.ts_data import (
|
||||
TimeSeriesDataset,
|
||||
DataTransformerTS,
|
||||
normalize_ts_data,
|
||||
)
|
||||
|
||||
from flaml.automl.ml import default_cv_score_agg_func, get_val_loss
|
||||
from flaml.automl.task.task import (
|
||||
Task,
|
||||
get_classification_objective,
|
||||
TS_FORECAST,
|
||||
TS_FORECASTPANEL,
|
||||
Task,
|
||||
get_classification_objective,
|
||||
)
|
||||
from flaml.automl.time_series.ts_data import (
|
||||
DataTransformerTS,
|
||||
TimeSeriesDataset,
|
||||
normalize_ts_data,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -33,18 +32,18 @@ class TimeSeriesTask(Task):
|
||||
if self._estimators is None:
|
||||
# put this into a function to avoid circular dependency
|
||||
from flaml.automl.time_series import (
|
||||
ARIMA,
|
||||
LGBM_TS,
|
||||
RF_TS,
|
||||
SARIMAX,
|
||||
CatBoost_TS,
|
||||
ExtraTrees_TS,
|
||||
HoltWinters,
|
||||
Orbit,
|
||||
Prophet,
|
||||
TemporalFusionTransformerEstimator,
|
||||
XGBoost_TS,
|
||||
XGBoostLimitDepth_TS,
|
||||
RF_TS,
|
||||
LGBM_TS,
|
||||
ExtraTrees_TS,
|
||||
CatBoost_TS,
|
||||
Prophet,
|
||||
Orbit,
|
||||
ARIMA,
|
||||
SARIMAX,
|
||||
TemporalFusionTransformerEstimator,
|
||||
HoltWinters,
|
||||
)
|
||||
|
||||
self._estimators = {
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
from .ts_model import (
|
||||
Prophet,
|
||||
Orbit,
|
||||
ARIMA,
|
||||
SARIMAX,
|
||||
HoltWinters,
|
||||
LGBM_TS,
|
||||
XGBoost_TS,
|
||||
RF_TS,
|
||||
ExtraTrees_TS,
|
||||
XGBoostLimitDepth_TS,
|
||||
CatBoost_TS,
|
||||
TimeSeriesEstimator,
|
||||
)
|
||||
from .tft import TemporalFusionTransformerEstimator
|
||||
|
||||
from .ts_data import TimeSeriesDataset
|
||||
from .ts_model import (
|
||||
ARIMA,
|
||||
LGBM_TS,
|
||||
RF_TS,
|
||||
SARIMAX,
|
||||
CatBoost_TS,
|
||||
ExtraTrees_TS,
|
||||
HoltWinters,
|
||||
Orbit,
|
||||
Prophet,
|
||||
TimeSeriesEstimator,
|
||||
XGBoost_TS,
|
||||
XGBoostLimitDepth_TS,
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import math
|
||||
import datetime
|
||||
import math
|
||||
from functools import lru_cache
|
||||
|
||||
import pandas as pd
|
||||
|
||||
@@ -12,8 +12,8 @@ except ImportError:
|
||||
DataFrame = Series = None
|
||||
|
||||
import numpy as np
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.decomposition import PCA
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
|
||||
|
||||
def make_lag_features(X: pd.DataFrame, y: pd.Series, lags: int):
|
||||
|
||||
@@ -105,6 +105,7 @@ class TemporalFusionTransformerEstimator(TimeSeriesEstimator):
|
||||
|
||||
def fit(self, X_train, y_train, budget=None, **kwargs):
|
||||
import warnings
|
||||
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
from pytorch_forecasting import TemporalFusionTransformer
|
||||
|
||||
@@ -2,7 +2,7 @@ import copy
|
||||
import datetime
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional, Callable, Dict, Generator, Union
|
||||
from typing import Callable, Dict, Generator, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
@@ -10,9 +10,9 @@ try:
|
||||
import pandas as pd
|
||||
from pandas import DataFrame, Series, to_datetime
|
||||
from scipy.sparse import issparse
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
from sklearn.impute import SimpleImputer
|
||||
from sklearn.compose import ColumnTransformer
|
||||
from sklearn.impute import SimpleImputer
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
|
||||
from .feature import monthly_fourier_features
|
||||
except ImportError:
|
||||
@@ -26,6 +26,8 @@ except ImportError:
|
||||
DataFrame = Series = None
|
||||
|
||||
|
||||
# dataclass will remove empty default value even with field(default_factory=lambda: [])
|
||||
# Change into default=None to place the attr
|
||||
@dataclass
|
||||
class TimeSeriesDataset:
|
||||
train_data: pd.DataFrame
|
||||
@@ -34,10 +36,10 @@ class TimeSeriesDataset:
|
||||
target_names: List[str]
|
||||
frequency: str
|
||||
test_data: pd.DataFrame
|
||||
time_varying_known_categoricals: List[str] = field(default_factory=lambda: [])
|
||||
time_varying_known_reals: List[str] = field(default_factory=lambda: [])
|
||||
time_varying_unknown_categoricals: List[str] = field(default_factory=lambda: [])
|
||||
time_varying_unknown_reals: List[str] = field(default_factory=lambda: [])
|
||||
time_varying_known_categoricals: List[str] = field(default=None)
|
||||
time_varying_known_reals: List[str] = field(default=None)
|
||||
time_varying_unknown_categoricals: List[str] = field(default=None)
|
||||
time_varying_unknown_reals: List[str] = field(default=None)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -403,7 +405,7 @@ class DataTransformerTS:
|
||||
self.cat_columns.append(column)
|
||||
elif X[column].nunique(dropna=True) < 2:
|
||||
self.drop_columns.append(column)
|
||||
elif X[column].dtype.name == "datetime64[ns]":
|
||||
elif X[column].dtype.name in ["datetime64[ns]", "datetime64[s]"]:
|
||||
pass # these will be processed at model level,
|
||||
# so they can also be done in the predict method
|
||||
else:
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import time
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import List, Optional, Union
|
||||
|
||||
try:
|
||||
@@ -22,26 +22,26 @@ except ImportError:
|
||||
import numpy as np
|
||||
|
||||
from flaml import tune
|
||||
from flaml.model import (
|
||||
suppress_stdout_stderr,
|
||||
SKLearnEstimator,
|
||||
logger,
|
||||
LGBMEstimator,
|
||||
XGBoostSklearnEstimator,
|
||||
RandomForestEstimator,
|
||||
ExtraTreesEstimator,
|
||||
XGBoostLimitDepthEstimator,
|
||||
from flaml.automl.data import TS_TIMESTAMP_COL, TS_VALUE_COL
|
||||
from flaml.automl.model import (
|
||||
CatBoostEstimator,
|
||||
)
|
||||
from flaml.data import TS_TIMESTAMP_COL, TS_VALUE_COL
|
||||
from flaml.automl.time_series.ts_data import (
|
||||
TimeSeriesDataset,
|
||||
enrich_dataset,
|
||||
enrich_dataframe,
|
||||
normalize_ts_data,
|
||||
create_forward_frame,
|
||||
ExtraTreesEstimator,
|
||||
LGBMEstimator,
|
||||
RandomForestEstimator,
|
||||
SKLearnEstimator,
|
||||
XGBoostLimitDepthEstimator,
|
||||
XGBoostSklearnEstimator,
|
||||
logger,
|
||||
suppress_stdout_stderr,
|
||||
)
|
||||
from flaml.automl.task import Task
|
||||
from flaml.automl.time_series.ts_data import (
|
||||
TimeSeriesDataset,
|
||||
create_forward_frame,
|
||||
enrich_dataframe,
|
||||
enrich_dataset,
|
||||
normalize_ts_data,
|
||||
)
|
||||
|
||||
|
||||
class TimeSeriesEstimator(SKLearnEstimator):
|
||||
@@ -143,6 +143,7 @@ class TimeSeriesEstimator(SKLearnEstimator):
|
||||
|
||||
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
|
||||
from sklearn.metrics import r2_score
|
||||
|
||||
from ..ml import metric_loss_score
|
||||
|
||||
y_pred = self.predict(X_val, **kwargs)
|
||||
@@ -610,15 +611,13 @@ class HoltWinters(StatsModelsEstimator):
|
||||
): # this would prevent heuristic initialization to work properly
|
||||
self.params["seasonal"] = None
|
||||
if (
|
||||
self.params["seasonal"] == "mul" and (train_df.y == 0).sum() > 0
|
||||
self.params["seasonal"] == "mul" and (train_df[target_col] == 0).sum() > 0
|
||||
): # cannot have multiplicative seasonality in this case
|
||||
self.params["seasonal"] = "add"
|
||||
if self.params["trend"] == "mul" and (train_df.y == 0).sum() > 0:
|
||||
if self.params["trend"] == "mul" and (train_df[target_col] == 0).sum() > 0:
|
||||
self.params["trend"] = "add"
|
||||
|
||||
if not self.params["seasonal"] or self.params["trend"] not in ["mul", "add"]:
|
||||
self.params["damped_trend"] = False
|
||||
|
||||
model = HWExponentialSmoothing(
|
||||
train_df[[target_col]],
|
||||
damped_trend=self.params["damped_trend"],
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import IO
|
||||
from contextlib import contextmanager
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
from typing import IO
|
||||
|
||||
logger = logging.getLogger("flaml.automl")
|
||||
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
import warnings
|
||||
|
||||
from flaml.automl.data import *
|
||||
|
||||
|
||||
warnings.warn(
|
||||
"Importing from `flaml.data` is deprecated. Please use `flaml.automl.data`.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
@@ -14,7 +14,6 @@ estimator.fit(X_train, y_train)
|
||||
estimator.predict(X_test, y_test)
|
||||
```
|
||||
|
||||
|
||||
1. Use AutoML.fit(). set `starting_points="data"` and `max_iter=0`.
|
||||
|
||||
```python
|
||||
@@ -36,10 +35,17 @@ automl.fit(X_train, y_train, **automl_settings)
|
||||
from flaml.default import preprocess_and_suggest_hyperparams
|
||||
|
||||
X, y = load_iris(return_X_y=True, as_frame=True)
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
|
||||
hyperparams, estimator_class, X_transformed, y_transformed, feature_transformer, label_transformer = preprocess_and_suggest_hyperparams(
|
||||
"classification", X_train, y_train, "lgbm"
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, test_size=0.33, random_state=42
|
||||
)
|
||||
(
|
||||
hyperparams,
|
||||
estimator_class,
|
||||
X_transformed,
|
||||
y_transformed,
|
||||
feature_transformer,
|
||||
label_transformer,
|
||||
) = preprocess_and_suggest_hyperparams("classification", X_train, y_train, "lgbm")
|
||||
model = estimator_class(**hyperparams) # estimator_class is LGBMClassifier
|
||||
model.fit(X_transformed, y_train) # LGBMClassifier can handle raw labels
|
||||
X_test = feature_transformer.transform(X_test) # preprocess test data
|
||||
@@ -172,7 +178,7 @@ Change "binary" into "multiclass" or "regression" for the other tasks.
|
||||
|
||||
For more technical details, please check our research paper.
|
||||
|
||||
* [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. arXiv preprint arXiv:2202.09927 (2022).
|
||||
- [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. arXiv preprint arXiv:2202.09927 (2022).
|
||||
|
||||
```bibtex
|
||||
@article{Kayali2022default,
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
from .suggest import (
|
||||
suggest_config,
|
||||
suggest_learner,
|
||||
suggest_hyperparams,
|
||||
preprocess_and_suggest_hyperparams,
|
||||
meta_feature,
|
||||
)
|
||||
from .estimator import (
|
||||
flamlize_estimator,
|
||||
LGBMClassifier,
|
||||
LGBMRegressor,
|
||||
XGBClassifier,
|
||||
XGBRegressor,
|
||||
RandomForestClassifier,
|
||||
RandomForestRegressor,
|
||||
ExtraTreesClassifier,
|
||||
ExtraTreesRegressor,
|
||||
LGBMClassifier,
|
||||
LGBMRegressor,
|
||||
RandomForestClassifier,
|
||||
RandomForestRegressor,
|
||||
XGBClassifier,
|
||||
XGBRegressor,
|
||||
flamlize_estimator,
|
||||
)
|
||||
from .suggest import (
|
||||
meta_feature,
|
||||
preprocess_and_suggest_hyperparams,
|
||||
suggest_config,
|
||||
suggest_hyperparams,
|
||||
suggest_learner,
|
||||
)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from functools import wraps
|
||||
|
||||
from flaml.automl.task.task import CLASSIFICATION
|
||||
|
||||
from .suggest import preprocess_and_suggest_hyperparams
|
||||
|
||||
DEFAULT_LOCATION = "default_location"
|
||||
@@ -105,7 +107,12 @@ def flamlize_estimator(super_class, name: str, task: str, alternatives=None):
|
||||
# if hasattr(self, "_classes"):
|
||||
# self._classes = self._label_transformer.classes_
|
||||
# else:
|
||||
self.classes_ = self._label_transformer.classes_
|
||||
try:
|
||||
self.classes_ = self._label_transformer.classes_
|
||||
except AttributeError:
|
||||
# xgboost 2: AttributeError: can't set attribute
|
||||
if "xgb" not in estimator_name:
|
||||
raise
|
||||
if "xgb" not in estimator_name:
|
||||
# rf and et would do inverse transform automatically; xgb doesn't
|
||||
self._label_transformer = None
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.preprocessing import RobustScaler
|
||||
from sklearn.metrics import pairwise_distances
|
||||
from sklearn.preprocessing import RobustScaler
|
||||
|
||||
|
||||
def _augment(row):
|
||||
@@ -12,7 +12,7 @@ def _augment(row):
|
||||
def construct_portfolio(regret_matrix, meta_features, regret_bound):
|
||||
"""The portfolio construction algorithm.
|
||||
|
||||
(Reference)[https://arxiv.org/abs/2202.09927].
|
||||
Reference: [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927).
|
||||
|
||||
Args:
|
||||
regret_matrix: A dataframe of regret matrix.
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.preprocessing import RobustScaler
|
||||
|
||||
from flaml.default import greedy
|
||||
from flaml.default.regret import load_result, build_regret
|
||||
from flaml.default.regret import build_regret, load_result
|
||||
from flaml.version import __version__
|
||||
|
||||
regret_bound = 0.01
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import argparse
|
||||
from os import path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import numpy as np
|
||||
import json
|
||||
import logging
|
||||
import pathlib
|
||||
import json
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.automl.data import DataTransformer
|
||||
from flaml.automl.task.task import CLASSIFICATION, get_classification_objective
|
||||
from flaml.automl.task.generic_task import len_labels
|
||||
from flaml.automl.task.factory import task_factory
|
||||
from flaml.automl.task.generic_task import len_labels
|
||||
from flaml.automl.task.task import CLASSIFICATION, get_classification_objective
|
||||
from flaml.version import __version__
|
||||
|
||||
try:
|
||||
|
||||
@@ -2,7 +2,6 @@ import warnings
|
||||
|
||||
from flaml.automl.ml import *
|
||||
|
||||
|
||||
warnings.warn(
|
||||
"Importing from `flaml.ml` is deprecated. Please use `flaml.automl.ml`.",
|
||||
DeprecationWarning,
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
import warnings
|
||||
|
||||
from flaml.automl.model import *
|
||||
|
||||
|
||||
warnings.warn(
|
||||
"Importing from `flaml.model` is deprecated. Please use `flaml.automl.model`.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
@@ -4,7 +4,8 @@ FLAML includes *ChaCha* which is an automatic hyperparameter tuning solution for
|
||||
|
||||
For more technical details about *ChaCha*, please check our paper.
|
||||
|
||||
* [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.
|
||||
- [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.
|
||||
|
||||
```
|
||||
@inproceedings{wu2021chacha,
|
||||
title={ChaCha for online AutoML},
|
||||
@@ -23,8 +24,9 @@ An example of online namespace interactions tuning in VW:
|
||||
```python
|
||||
# require: pip install flaml[vw]
|
||||
from flaml import AutoVW
|
||||
'''create an AutoVW instance for tuning namespace interactions'''
|
||||
autovw = AutoVW(max_live_model_num=5, search_space={'interactions': AutoVW.AUTOMATIC})
|
||||
|
||||
"""create an AutoVW instance for tuning namespace interactions"""
|
||||
autovw = AutoVW(max_live_model_num=5, search_space={"interactions": AutoVW.AUTOMATIC})
|
||||
```
|
||||
|
||||
An example of online tuning of both namespace interactions and learning rate in VW:
|
||||
@@ -33,12 +35,18 @@ An example of online tuning of both namespace interactions and learning rate in
|
||||
# require: pip install flaml[vw]
|
||||
from flaml import AutoVW
|
||||
from flaml.tune import loguniform
|
||||
''' create an AutoVW instance for tuning namespace interactions and learning rate'''
|
||||
|
||||
""" create an AutoVW instance for tuning namespace interactions and learning rate"""
|
||||
# set up the search space and init config
|
||||
search_space_nilr = {'interactions': AutoVW.AUTOMATIC, 'learning_rate': loguniform(lower=2e-10, upper=1.0)}
|
||||
init_config_nilr = {'interactions': set(), 'learning_rate': 0.5}
|
||||
search_space_nilr = {
|
||||
"interactions": AutoVW.AUTOMATIC,
|
||||
"learning_rate": loguniform(lower=2e-10, upper=1.0),
|
||||
}
|
||||
init_config_nilr = {"interactions": set(), "learning_rate": 0.5}
|
||||
# create an AutoVW instance
|
||||
autovw = AutoVW(max_live_model_num=5, search_space=search_space_nilr, init_config=init_config_nilr)
|
||||
autovw = AutoVW(
|
||||
max_live_model_num=5, search_space=search_space_nilr, init_config=init_config_nilr
|
||||
)
|
||||
```
|
||||
|
||||
A user can use the resulting AutoVW instances `autovw` in a similar way to a vanilla Vowpal Wabbit instance, i.e., `pyvw.vw`, to perform online learning by iteratively calling its `predict(data_example)` and `learn(data_example)` functions at each data example.
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
from typing import Optional, Union
|
||||
import logging
|
||||
from typing import Optional, Union
|
||||
|
||||
from flaml.onlineml import OnlineTrialRunner
|
||||
from flaml.onlineml.trial import get_ns_feature_dim_from_vw_example
|
||||
from flaml.tune import (
|
||||
Trial,
|
||||
Categorical,
|
||||
Float,
|
||||
PolynomialExpansionSet,
|
||||
Trial,
|
||||
polynomial_expansion_set,
|
||||
)
|
||||
from flaml.onlineml import OnlineTrialRunner
|
||||
from flaml.tune.scheduler import ChaChaScheduler
|
||||
from flaml.tune.searcher import ChampionFrontierSearcher
|
||||
from flaml.onlineml.trial import get_ns_feature_dim_from_vw_example
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -140,7 +141,7 @@ class AutoVW:
|
||||
max_live_model_num=self._max_live_model_num,
|
||||
searcher=searcher,
|
||||
scheduler=scheduler,
|
||||
**self._automl_runner_args
|
||||
**self._automl_runner_args,
|
||||
)
|
||||
|
||||
def predict(self, data_sample):
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
import numpy as np
|
||||
import logging
|
||||
import time
|
||||
import math
|
||||
import copy
|
||||
import collections
|
||||
import copy
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
from typing import Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.tune import Trial
|
||||
|
||||
try:
|
||||
from sklearn.metrics import mean_squared_error, mean_absolute_error
|
||||
from sklearn.metrics import mean_absolute_error, mean_squared_error
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import numpy as np
|
||||
import logging
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.tune import Trial
|
||||
from flaml.tune.scheduler import TrialScheduler
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
@@ -5,45 +5,47 @@ It can be used standalone, or together with ray tune or nni. Please find detaile
|
||||
|
||||
Below are some quick examples.
|
||||
|
||||
* Example for sequential tuning (recommended when compute resource is limited and each trial can consume all the resources):
|
||||
- Example for sequential tuning (recommended when compute resource is limited and each trial can consume all the resources):
|
||||
|
||||
```python
|
||||
# require: pip install flaml[blendsearch]
|
||||
from flaml import tune
|
||||
import time
|
||||
|
||||
|
||||
def evaluate_config(config):
|
||||
'''evaluate a hyperparameter configuration'''
|
||||
"""evaluate a hyperparameter configuration"""
|
||||
# we uss a toy example with 2 hyperparameters
|
||||
metric = (round(config['x'])-85000)**2 - config['x']/config['y']
|
||||
metric = (round(config["x"]) - 85000) ** 2 - config["x"] / config["y"]
|
||||
# usually the evaluation takes an non-neglible cost
|
||||
# and the cost could be related to certain hyperparameters
|
||||
# in this example, we assume it's proportional to x
|
||||
time.sleep(config['x']/100000)
|
||||
time.sleep(config["x"] / 100000)
|
||||
# use tune.report to report the metric to optimize
|
||||
tune.report(metric=metric)
|
||||
|
||||
|
||||
analysis = tune.run(
|
||||
evaluate_config, # the function to evaluate a config
|
||||
evaluate_config, # the function to evaluate a config
|
||||
config={
|
||||
'x': tune.lograndint(lower=1, upper=100000),
|
||||
'y': tune.randint(lower=1, upper=100000)
|
||||
}, # the search space
|
||||
low_cost_partial_config={'x':1}, # a initial (partial) config with low cost
|
||||
metric='metric', # the name of the metric used for optimization
|
||||
mode='min', # the optimization mode, 'min' or 'max'
|
||||
num_samples=-1, # the maximal number of configs to try, -1 means infinite
|
||||
time_budget_s=60, # the time budget in seconds
|
||||
local_dir='logs/', # the local directory to store logs
|
||||
"x": tune.lograndint(lower=1, upper=100000),
|
||||
"y": tune.randint(lower=1, upper=100000),
|
||||
}, # the search space
|
||||
low_cost_partial_config={"x": 1}, # a initial (partial) config with low cost
|
||||
metric="metric", # the name of the metric used for optimization
|
||||
mode="min", # the optimization mode, 'min' or 'max'
|
||||
num_samples=-1, # the maximal number of configs to try, -1 means infinite
|
||||
time_budget_s=60, # the time budget in seconds
|
||||
local_dir="logs/", # the local directory to store logs
|
||||
# verbose=0, # verbosity
|
||||
# use_ray=True, # uncomment when performing parallel tuning using ray
|
||||
)
|
||||
)
|
||||
|
||||
print(analysis.best_trial.last_result) # the best trial's result
|
||||
print(analysis.best_config) # the best config
|
||||
print(analysis.best_config) # the best config
|
||||
```
|
||||
|
||||
* Example for using ray tune's API:
|
||||
- Example for using ray tune's API:
|
||||
|
||||
```python
|
||||
# require: pip install flaml[blendsearch,ray]
|
||||
@@ -51,36 +53,39 @@ from ray import tune as raytune
|
||||
from flaml import CFO, BlendSearch
|
||||
import time
|
||||
|
||||
|
||||
def evaluate_config(config):
|
||||
'''evaluate a hyperparameter configuration'''
|
||||
"""evaluate a hyperparameter configuration"""
|
||||
# we use a toy example with 2 hyperparameters
|
||||
metric = (round(config['x'])-85000)**2 - config['x']/config['y']
|
||||
metric = (round(config["x"]) - 85000) ** 2 - config["x"] / config["y"]
|
||||
# usually the evaluation takes a non-neglible cost
|
||||
# and the cost could be related to certain hyperparameters
|
||||
# in this example, we assume it's proportional to x
|
||||
time.sleep(config['x']/100000)
|
||||
time.sleep(config["x"] / 100000)
|
||||
# use tune.report to report the metric to optimize
|
||||
tune.report(metric=metric)
|
||||
|
||||
|
||||
# provide a time budget (in seconds) for the tuning process
|
||||
time_budget_s = 60
|
||||
# provide the search space
|
||||
config_search_space = {
|
||||
'x': tune.lograndint(lower=1, upper=100000),
|
||||
'y': tune.randint(lower=1, upper=100000)
|
||||
}
|
||||
"x": tune.lograndint(lower=1, upper=100000),
|
||||
"y": tune.randint(lower=1, upper=100000),
|
||||
}
|
||||
# provide the low cost partial config
|
||||
low_cost_partial_config={'x':1}
|
||||
low_cost_partial_config = {"x": 1}
|
||||
|
||||
# set up CFO
|
||||
cfo = CFO(low_cost_partial_config=low_cost_partial_config)
|
||||
|
||||
# set up BlendSearch
|
||||
blendsearch = BlendSearch(
|
||||
metric="metric", mode="min",
|
||||
metric="metric",
|
||||
mode="min",
|
||||
space=config_search_space,
|
||||
low_cost_partial_config=low_cost_partial_config,
|
||||
time_budget_s=time_budget_s
|
||||
time_budget_s=time_budget_s,
|
||||
)
|
||||
# NOTE: when using BlendSearch as a search_alg in ray tune, you need to
|
||||
# configure the 'time_budget_s' for BlendSearch accordingly such that
|
||||
@@ -89,28 +94,28 @@ blendsearch = BlendSearch(
|
||||
# automatically in flaml.
|
||||
|
||||
analysis = raytune.run(
|
||||
evaluate_config, # the function to evaluate a config
|
||||
evaluate_config, # the function to evaluate a config
|
||||
config=config_search_space,
|
||||
metric='metric', # the name of the metric used for optimization
|
||||
mode='min', # the optimization mode, 'min' or 'max'
|
||||
num_samples=-1, # the maximal number of configs to try, -1 means infinite
|
||||
time_budget_s=time_budget_s, # the time budget in seconds
|
||||
local_dir='logs/', # the local directory to store logs
|
||||
search_alg=blendsearch # or cfo
|
||||
metric="metric", # the name of the metric used for optimization
|
||||
mode="min", # the optimization mode, 'min' or 'max'
|
||||
num_samples=-1, # the maximal number of configs to try, -1 means infinite
|
||||
time_budget_s=time_budget_s, # the time budget in seconds
|
||||
local_dir="logs/", # the local directory to store logs
|
||||
search_alg=blendsearch, # or cfo
|
||||
)
|
||||
|
||||
print(analysis.best_trial.last_result) # the best trial's result
|
||||
print(analysis.best_config) # the best config
|
||||
```
|
||||
|
||||
* Example for using NNI: An example of using BlendSearch with NNI can be seen in [test](https://github.com/microsoft/FLAML/tree/main/test/nni). CFO can be used as well in a similar manner. To run the example, first make sure you have [NNI](https://nni.readthedocs.io/en/stable/) installed, then run:
|
||||
- Example for using NNI: An example of using BlendSearch with NNI can be seen in [test](https://github.com/microsoft/FLAML/tree/main/test/nni). CFO can be used as well in a similar manner. To run the example, first make sure you have [NNI](https://nni.readthedocs.io/en/stable/) installed, then run:
|
||||
|
||||
```shell
|
||||
$nnictl create --config ./config.yml
|
||||
```
|
||||
|
||||
* For more examples, please check out
|
||||
[notebooks](https://github.com/microsoft/FLAML/tree/main/notebook/).
|
||||
- For more examples, please check out
|
||||
[notebooks](https://github.com/microsoft/FLAML/tree/main/notebook/).
|
||||
|
||||
`flaml` offers two HPO methods: CFO and BlendSearch.
|
||||
`flaml.tune` uses BlendSearch by default.
|
||||
@@ -185,16 +190,16 @@ tune.run(...
|
||||
)
|
||||
```
|
||||
|
||||
* Recommended scenario: cost-related hyperparameters exist, a low-cost
|
||||
initial point is known, and the search space is complex such that local search
|
||||
is prone to be stuck at local optima.
|
||||
- Recommended scenario: cost-related hyperparameters exist, a low-cost
|
||||
initial point is known, and the search space is complex such that local search
|
||||
is prone to be stuck at local optima.
|
||||
|
||||
* Suggestion about using larger search space in BlendSearch:
|
||||
In hyperparameter optimization, a larger search space is desirable because it is more likely to include the optimal configuration (or one of the optimal configurations) in hindsight. However the performance (especially anytime performance) of most existing HPO methods is undesirable if the cost of the configurations in the search space has a large variation. Thus hand-crafted small search spaces (with relatively homogeneous cost) are often used in practice for these methods, which is subject to idiosyncrasy. BlendSearch combines the benefits of local search and global search, which enables a smart (economical) way of deciding where to explore in the search space even though it is larger than necessary. This allows users to specify a larger search space in BlendSearch, which is often easier and a better practice than narrowing down the search space by hand.
|
||||
- Suggestion about using larger search space in BlendSearch:
|
||||
In hyperparameter optimization, a larger search space is desirable because it is more likely to include the optimal configuration (or one of the optimal configurations) in hindsight. However the performance (especially anytime performance) of most existing HPO methods is undesirable if the cost of the configurations in the search space has a large variation. Thus hand-crafted small search spaces (with relatively homogeneous cost) are often used in practice for these methods, which is subject to idiosyncrasy. BlendSearch combines the benefits of local search and global search, which enables a smart (economical) way of deciding where to explore in the search space even though it is larger than necessary. This allows users to specify a larger search space in BlendSearch, which is often easier and a better practice than narrowing down the search space by hand.
|
||||
|
||||
For more technical details, please check our papers.
|
||||
|
||||
* [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.
|
||||
- [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.
|
||||
|
||||
```bibtex
|
||||
@inproceedings{wu2021cfo,
|
||||
@@ -205,7 +210,7 @@ For more technical details, please check our papers.
|
||||
}
|
||||
```
|
||||
|
||||
* [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.
|
||||
- [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.
|
||||
|
||||
```bibtex
|
||||
@inproceedings{wang2021blendsearch,
|
||||
|
||||
@@ -3,16 +3,16 @@ try:
|
||||
|
||||
assert ray_version >= "1.10.0"
|
||||
from ray.tune import (
|
||||
uniform,
|
||||
lograndint,
|
||||
loguniform,
|
||||
qlograndint,
|
||||
qloguniform,
|
||||
qrandint,
|
||||
qrandn,
|
||||
quniform,
|
||||
randint,
|
||||
qrandint,
|
||||
randn,
|
||||
qrandn,
|
||||
loguniform,
|
||||
qloguniform,
|
||||
lograndint,
|
||||
qlograndint,
|
||||
uniform,
|
||||
)
|
||||
|
||||
if ray_version.startswith("1."):
|
||||
@@ -20,21 +20,20 @@ try:
|
||||
else:
|
||||
from ray.tune.search import sample
|
||||
except (ImportError, AssertionError):
|
||||
from . import sample
|
||||
from .sample import (
|
||||
uniform,
|
||||
lograndint,
|
||||
loguniform,
|
||||
qlograndint,
|
||||
qloguniform,
|
||||
qrandint,
|
||||
qrandn,
|
||||
quniform,
|
||||
randint,
|
||||
qrandint,
|
||||
randn,
|
||||
qrandn,
|
||||
loguniform,
|
||||
qloguniform,
|
||||
lograndint,
|
||||
qlograndint,
|
||||
uniform,
|
||||
)
|
||||
from . import sample
|
||||
from .tune import run, report, INCUMBENT_RESULT
|
||||
from .sample import polynomial_expansion_set
|
||||
from .sample import PolynomialExpansionSet, Categorical, Float
|
||||
from .sample import Categorical, Float, PolynomialExpansionSet, polynomial_expansion_set
|
||||
from .trial import Trial
|
||||
from .tune import INCUMBENT_RESULT, report, run
|
||||
from .utils import choice
|
||||
|
||||
@@ -15,10 +15,12 @@
|
||||
# This source file is adapted here because ray does not fully support Windows.
|
||||
|
||||
# Copyright (c) Microsoft Corporation.
|
||||
from typing import Dict, Optional
|
||||
import numpy as np
|
||||
from .trial import Trial
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .trial import Trial
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import logging
|
||||
from copy import copy
|
||||
from math import isclose
|
||||
from typing import Any, Dict, List, Optional, Sequence, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
# Backwards compatibility
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from .trial_scheduler import TrialScheduler
|
||||
from .online_scheduler import (
|
||||
ChaChaScheduler,
|
||||
OnlineScheduler,
|
||||
OnlineSuccessiveDoublingScheduler,
|
||||
ChaChaScheduler,
|
||||
)
|
||||
from .trial_scheduler import TrialScheduler
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
import numpy as np
|
||||
import logging
|
||||
from typing import Dict
|
||||
from flaml.tune.scheduler import TrialScheduler
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.tune import Trial
|
||||
|
||||
from .trial_scheduler import TrialScheduler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
@@ -2,10 +2,11 @@
|
||||
# * Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
from typing import Dict, Optional, List, Tuple, Callable, Union
|
||||
import numpy as np
|
||||
import time
|
||||
import pickle
|
||||
import time
|
||||
from typing import Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
from ray import __version__ as ray_version
|
||||
@@ -18,17 +19,17 @@ try:
|
||||
from ray.tune.search import Searcher
|
||||
from ray.tune.search.optuna import OptunaSearch as GlobalSearch
|
||||
except (ImportError, AssertionError):
|
||||
from .suggestion import Searcher
|
||||
from .suggestion import OptunaSearch as GlobalSearch
|
||||
from ..trial import unflatten_dict, flatten_dict
|
||||
from .. import INCUMBENT_RESULT
|
||||
from .search_thread import SearchThread
|
||||
from .flow2 import FLOW2
|
||||
from ..space import add_cost_to_space, indexof, normalize, define_by_run_func
|
||||
from ..result import TIME_TOTAL_S
|
||||
|
||||
from .suggestion import Searcher
|
||||
import logging
|
||||
|
||||
from .. import INCUMBENT_RESULT
|
||||
from ..result import TIME_TOTAL_S
|
||||
from ..space import add_cost_to_space, define_by_run_func, indexof, normalize
|
||||
from ..trial import flatten_dict, unflatten_dict
|
||||
from .flow2 import FLOW2
|
||||
from .search_thread import SearchThread
|
||||
|
||||
SEARCH_THREAD_EPS = 1.0
|
||||
PENALTY = 1e10 # penalty term for constraints
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -931,27 +932,27 @@ try:
|
||||
|
||||
assert ray_version >= "1.10.0"
|
||||
from ray.tune import (
|
||||
uniform,
|
||||
quniform,
|
||||
choice,
|
||||
randint,
|
||||
qrandint,
|
||||
randn,
|
||||
qrandn,
|
||||
loguniform,
|
||||
qloguniform,
|
||||
qrandint,
|
||||
qrandn,
|
||||
quniform,
|
||||
randint,
|
||||
randn,
|
||||
uniform,
|
||||
)
|
||||
except (ImportError, AssertionError):
|
||||
from ..sample import (
|
||||
uniform,
|
||||
quniform,
|
||||
choice,
|
||||
randint,
|
||||
qrandint,
|
||||
randn,
|
||||
qrandn,
|
||||
loguniform,
|
||||
qloguniform,
|
||||
qrandint,
|
||||
qrandn,
|
||||
quniform,
|
||||
randint,
|
||||
randn,
|
||||
uniform,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -978,7 +979,7 @@ class BlendSearchTuner(BlendSearch, NNITuner):
|
||||
result = {
|
||||
"config": parameters,
|
||||
self._metric: extract_scalar_reward(value),
|
||||
self.cost_attr: 1 if isinstance(value, float) else value.get(self.cost_attr, value.get("sequence", 1))
|
||||
self.cost_attr: 1 if isinstance(value, float) else value.get(self.cost_attr, value.get("sequence", 1)),
|
||||
# if nni does not report training cost,
|
||||
# using sequence as an approximation.
|
||||
# if no sequence, using a constant 1
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
# * Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
from .flow2 import FLOW2
|
||||
from .blendsearch import CFO
|
||||
from .flow2 import FLOW2
|
||||
|
||||
|
||||
class FLOW2Cat(FLOW2):
|
||||
|
||||
@@ -2,31 +2,34 @@
|
||||
# * Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
from typing import Dict, Optional, Tuple
|
||||
import numpy as np
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import Dict, Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
from ray import __version__ as ray_version
|
||||
|
||||
assert ray_version >= "1.0.0"
|
||||
if ray_version.startswith("1."):
|
||||
from ray.tune.suggest import Searcher
|
||||
from ray.tune import sample
|
||||
from ray.tune.suggest import Searcher
|
||||
else:
|
||||
from ray.tune.search import Searcher, sample
|
||||
from ray.tune.utils.util import flatten_dict, unflatten_dict
|
||||
except (ImportError, AssertionError):
|
||||
from .suggestion import Searcher
|
||||
from flaml.tune import sample
|
||||
|
||||
from ..trial import flatten_dict, unflatten_dict
|
||||
from .suggestion import Searcher
|
||||
from flaml.config import SAMPLE_MULTIPLY_FACTOR
|
||||
|
||||
from ..space import (
|
||||
complete_config,
|
||||
denormalize,
|
||||
normalize,
|
||||
generate_variants_compatible,
|
||||
normalize,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -135,7 +138,7 @@ class FLOW2(Searcher):
|
||||
self.max_resource = max_resource
|
||||
self._resource = None
|
||||
self._f_best = None # only use for lexico_comapre. It represent the best value achieved by lexico_flow.
|
||||
self._step_lb = np.Inf
|
||||
self._step_lb = np.inf
|
||||
self._histories = None # only use for lexico_comapre. It records the result of historical configurations.
|
||||
if space is not None:
|
||||
self._init_search()
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import numpy as np
|
||||
import logging
|
||||
import itertools
|
||||
from typing import Dict, Optional, List
|
||||
from flaml.tune import Categorical, Float, PolynomialExpansionSet, Trial
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
from flaml.onlineml import VowpalWabbitTrial
|
||||
from flaml.tune import Categorical, Float, PolynomialExpansionSet, Trial
|
||||
from flaml.tune.searcher import CFO
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -64,7 +66,7 @@ class ChampionFrontierSearcher(BaseSearcher):
|
||||
POLY_EXPANSION_ADDITION_NUM = 1
|
||||
# the order of polynomial expansions to add based on the given seed interactions
|
||||
EXPANSION_ORDER = 2
|
||||
# the number of new challengers with new numerical hyperparamter configs
|
||||
# the number of new challengers with new numerical hyperparameter configs
|
||||
NUMERICAL_NUM = 2
|
||||
|
||||
# In order to use CFO, a loss name and loss values of configs are need
|
||||
@@ -78,7 +80,7 @@ class ChampionFrontierSearcher(BaseSearcher):
|
||||
CFO_SEARCHER_METRIC_NAME = "pseudo_loss"
|
||||
CFO_SEARCHER_LARGE_LOSS = 1e6
|
||||
|
||||
# the random seed used in generating numerical hyperparamter configs (when CFO is not used)
|
||||
# the random seed used in generating numerical hyperparameter configs (when CFO is not used)
|
||||
NUM_RANDOM_SEED = 111
|
||||
|
||||
CHAMPION_TRIAL_NAME = "champion_trial"
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
from typing import Dict, Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
@@ -15,11 +16,12 @@ try:
|
||||
from ray.tune.search import Searcher
|
||||
except (ImportError, AssertionError):
|
||||
from .suggestion import Searcher
|
||||
from .flow2 import FLOW2
|
||||
from ..space import add_cost_to_space, unflatten_hierarchical
|
||||
from ..result import TIME_TOTAL_S
|
||||
import logging
|
||||
|
||||
from ..result import TIME_TOTAL_S
|
||||
from ..space import add_cost_to_space, unflatten_hierarchical
|
||||
from .flow2 import FLOW2
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
@@ -15,15 +15,17 @@
|
||||
# This source file is adapted here because ray does not fully support Windows.
|
||||
|
||||
# Copyright (c) Microsoft Corporation.
|
||||
import time
|
||||
import functools
|
||||
import warnings
|
||||
import copy
|
||||
import numpy as np
|
||||
import functools
|
||||
import logging
|
||||
from typing import Any, Dict, Optional, Union, List, Tuple, Callable
|
||||
import pickle
|
||||
from .variant_generator import parse_spec_vars
|
||||
import time
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..sample import (
|
||||
Categorical,
|
||||
Domain,
|
||||
@@ -34,7 +36,7 @@ from ..sample import (
|
||||
Uniform,
|
||||
)
|
||||
from ..trial import flatten_dict, unflatten_dict
|
||||
from collections import defaultdict
|
||||
from .variant_generator import parse_spec_vars
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -183,7 +185,7 @@ class ConcurrencyLimiter(Searcher):
|
||||
"""
|
||||
|
||||
def __init__(self, searcher: Searcher, max_concurrent: int, batch: bool = False):
|
||||
assert type(max_concurrent) is int and max_concurrent > 0
|
||||
assert isinstance(max_concurrent, int) and max_concurrent > 0
|
||||
self.searcher = searcher
|
||||
self.max_concurrent = max_concurrent
|
||||
self.batch = batch
|
||||
@@ -252,8 +254,8 @@ try:
|
||||
import optuna as ot
|
||||
from optuna.distributions import BaseDistribution as OptunaDistribution
|
||||
from optuna.samplers import BaseSampler
|
||||
from optuna.trial import TrialState as OptunaTrialState
|
||||
from optuna.trial import Trial as OptunaTrial
|
||||
from optuna.trial import TrialState as OptunaTrialState
|
||||
except ImportError:
|
||||
ot = None
|
||||
OptunaDistribution = None
|
||||
@@ -559,7 +561,15 @@ class OptunaSearch(Searcher):
|
||||
self._space = space
|
||||
|
||||
self._points_to_evaluate = points_to_evaluate or []
|
||||
self._evaluated_rewards = evaluated_rewards
|
||||
# rewards should be a list of floats, not a dict
|
||||
# After Optuna > 3.5.0, there is a check for NaN in the list "any(math.isnan(x) for x in self._values)"
|
||||
# which will raise an error when encountering a dict
|
||||
if evaluated_rewards is not None:
|
||||
self._evaluated_rewards = [
|
||||
list(item.values())[0] if isinstance(item, dict) else item for item in evaluated_rewards
|
||||
]
|
||||
else:
|
||||
self._evaluated_rewards = evaluated_rewards
|
||||
|
||||
self._study_name = "optuna" # Fixed study name for in-memory storage
|
||||
|
||||
@@ -871,9 +881,9 @@ class OptunaSearch(Searcher):
|
||||
|
||||
elif isinstance(domain, Integer):
|
||||
if isinstance(sampler, LogUniform):
|
||||
return ot.distributions.IntLogUniformDistribution(
|
||||
domain.lower, domain.upper - 1, step=quantize or 1
|
||||
)
|
||||
# ``step`` argument Deprecated in v2.0.0. ``step`` argument should be 1 in Log Distribution
|
||||
# The removal of this feature is currently scheduled for v4.0.0,
|
||||
return ot.distributions.IntLogUniformDistribution(domain.lower, domain.upper - 1, step=1)
|
||||
elif isinstance(sampler, Uniform):
|
||||
# Upper bound should be inclusive for quantization and
|
||||
# exclusive otherwise
|
||||
|
||||
@@ -17,9 +17,11 @@
|
||||
# Copyright (c) Microsoft Corporation.
|
||||
import copy
|
||||
import logging
|
||||
from typing import Any, Dict, Generator, List, Tuple
|
||||
import numpy
|
||||
import random
|
||||
from typing import Any, Dict, Generator, List, Tuple
|
||||
|
||||
import numpy
|
||||
|
||||
from ..sample import Categorical, Domain, RandomState
|
||||
|
||||
try:
|
||||
|
||||
@@ -11,9 +11,10 @@ try:
|
||||
except (ImportError, AssertionError):
|
||||
from . import sample
|
||||
from .searcher.variant_generator import generate_variants
|
||||
from typing import Dict, Optional, Any, Tuple, Generator, List, Union
|
||||
import numpy as np
|
||||
import logging
|
||||
from typing import Any, Dict, Generator, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -489,7 +490,7 @@ def complete_config(
|
||||
elif domain.bounded:
|
||||
up, low, gauss_std = 1, 0, 1.0
|
||||
else:
|
||||
up, low, gauss_std = np.Inf, -np.Inf, 1.0
|
||||
up, low, gauss_std = np.inf, -np.inf, 1.0
|
||||
if domain.bounded:
|
||||
if isinstance(up, list):
|
||||
up[-1] = min(up[-1], 1)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from flaml.tune.spark.utils import (
|
||||
broadcast_code,
|
||||
check_spark,
|
||||
get_n_cpus,
|
||||
with_parameters,
|
||||
broadcast_code,
|
||||
)
|
||||
|
||||
__all__ = ["check_spark", "get_n_cpus", "with_parameters", "broadcast_code"]
|
||||
|
||||
@@ -5,7 +5,6 @@ import threading
|
||||
import time
|
||||
from functools import lru_cache, partial
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_formatter = logging.Formatter(
|
||||
"[%(name)s: %(asctime)s] {%(lineno)d} %(levelname)s - %(message)s", "%m-%d %H:%M:%S"
|
||||
@@ -13,10 +12,10 @@ logger_formatter = logging.Formatter(
|
||||
logger.propagate = False
|
||||
os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
|
||||
try:
|
||||
import py4j
|
||||
import pyspark
|
||||
from pyspark.sql import SparkSession
|
||||
from pyspark.util import VersionUtils
|
||||
import py4j
|
||||
except ImportError:
|
||||
_have_spark = False
|
||||
py4j = None
|
||||
@@ -286,6 +285,7 @@ class PySparkOvertimeMonitor:
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
"""Exit the context manager.
|
||||
This will wait for the monitor thread to nicely exit."""
|
||||
logger.debug(f"monitor exited: {exc_type}, {exc_value}, {exc_traceback}")
|
||||
if self._force_cancel and _have_spark:
|
||||
self._finished_flag = True
|
||||
self._monitor_daemon.join()
|
||||
@@ -296,6 +296,11 @@ class PySparkOvertimeMonitor:
|
||||
if not exc_type:
|
||||
return True
|
||||
elif exc_type == py4j.protocol.Py4JJavaError:
|
||||
logger.debug("Py4JJavaError Exception: %s", exc_value)
|
||||
return True
|
||||
elif exc_type == TypeError:
|
||||
# When force cancel, joblib>1.2.0 will raise joblib.externals.loky.process_executor._ExceptionWithTraceback
|
||||
logger.debug("TypeError Exception: %s", exc_value)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
# This source file is adapted here because ray does not fully support Windows.
|
||||
|
||||
# Copyright (c) Microsoft Corporation.
|
||||
import uuid
|
||||
import time
|
||||
from numbers import Number
|
||||
import uuid
|
||||
from collections import deque
|
||||
from numbers import Number
|
||||
|
||||
|
||||
def flatten_dict(dt, delimiter="/", prevent_delimiter=False):
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# * Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
# try:
|
||||
@@ -10,7 +11,6 @@ from typing import Optional
|
||||
# from ray.tune.trial import Trial
|
||||
# except (ImportError, AssertionError):
|
||||
from .trial import Trial
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -2,13 +2,14 @@
|
||||
# * Copyright (c) FLAML authors. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
from typing import Optional, Union, List, Callable, Tuple, Dict
|
||||
import numpy as np
|
||||
import datetime
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from typing import Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
from ray import __version__ as ray_version
|
||||
@@ -21,11 +22,13 @@ except (ImportError, AssertionError):
|
||||
else:
|
||||
ray_available = True
|
||||
|
||||
from .trial import Trial
|
||||
from .result import DEFAULT_METRIC
|
||||
import logging
|
||||
|
||||
from flaml.tune.spark.utils import PySparkOvertimeMonitor, check_spark
|
||||
|
||||
from .result import DEFAULT_METRIC
|
||||
from .trial import Trial
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.propagate = False
|
||||
_use_ray = True
|
||||
@@ -92,10 +95,12 @@ class ExperimentAnalysis(EA):
|
||||
feasible_index_filter = np.where(
|
||||
feasible_value
|
||||
<= max(
|
||||
f_best[k_metric] + self.lexico_objectives["tolerances"][k_metric]
|
||||
if not isinstance(self.lexico_objectives["tolerances"][k_metric], str)
|
||||
else f_best[k_metric]
|
||||
* (1 + 0.01 * float(self.lexico_objectives["tolerances"][k_metric].replace("%", ""))),
|
||||
(
|
||||
f_best[k_metric] + self.lexico_objectives["tolerances"][k_metric]
|
||||
if not isinstance(self.lexico_objectives["tolerances"][k_metric], str)
|
||||
else f_best[k_metric]
|
||||
* (1 + 0.01 * float(self.lexico_objectives["tolerances"][k_metric].replace("%", "")))
|
||||
),
|
||||
k_target,
|
||||
)
|
||||
)[0]
|
||||
@@ -481,7 +486,7 @@ def run(
|
||||
else:
|
||||
logger.setLevel(logging.CRITICAL)
|
||||
|
||||
from .searcher.blendsearch import BlendSearch, CFO, RandomSearch
|
||||
from .searcher.blendsearch import CFO, BlendSearch, RandomSearch
|
||||
|
||||
if lexico_objectives is not None:
|
||||
if "modes" not in lexico_objectives.keys():
|
||||
@@ -650,12 +655,13 @@ def run(
|
||||
if not spark_available:
|
||||
raise spark_error_msg
|
||||
try:
|
||||
from pyspark.sql import SparkSession
|
||||
from joblib import Parallel, delayed, parallel_backend
|
||||
from joblibspark import register_spark
|
||||
from pyspark.sql import SparkSession
|
||||
except ImportError as e:
|
||||
raise ImportError(f"{e}. Try pip install flaml[spark] or set use_spark=False.")
|
||||
from flaml.tune.searcher.suggestion import ConcurrencyLimiter
|
||||
|
||||
from .trial_runner import SparkTrialRunner
|
||||
|
||||
register_spark()
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "2.0.1"
|
||||
__version__ = "2.2.0"
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"source": [
|
||||
"# Auto Generated Agent Chat: Using MathChat to Solve Math Problems\n",
|
||||
"\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
|
||||
"\n",
|
||||
"MathChat is an experimental convesational framework for math problem solving. In this notebook, we demonstrate how to use MathChat to solve math problems. MathChat uses the `AssistantAgent` and `MathUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `MathUserProxyAgent` implements a different auto reply mechanism corresponding to the MathChat prompts. You can find more details in the paper [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337) or the [blogpost](https://microsoft.github.io/FLAML/blog/2023/06/28/MathChat).\n",
|
||||
"\n",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"# Auto Generated Agent Chat: Using RetrieveChat for Retrieve Augmented Code Generation and Question Answering\n",
|
||||
"\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
|
||||
"\n",
|
||||
"RetrieveChat is a convesational system for retrieve augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `RetrieveAssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveAssistantAgent` and `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
|
||||
"\n",
|
||||
@@ -2604,9 +2604,9 @@
|
||||
" - if \"data:path\" use data-dependent defaults which are stored at path;\n",
|
||||
" - if \"static\", use data-independent defaults.\n",
|
||||
" If dict, keys are the name of the estimators, and values are the starting\n",
|
||||
" hyperparamter configurations for the corresponding estimators.\n",
|
||||
" The value can be a single hyperparamter configuration dict or a list\n",
|
||||
" of hyperparamter configuration dicts.\n",
|
||||
" hyperparameter configurations for the corresponding estimators.\n",
|
||||
" The value can be a single hyperparameter configuration dict or a list\n",
|
||||
" of hyperparameter configuration dicts.\n",
|
||||
" In the following code example, we get starting_points from the\n",
|
||||
" `automl` object and use them in the `new_automl` object.\n",
|
||||
" e.g.,\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,7 +16,7 @@
|
||||
"# Auto Generated Agent Chat: Chess Game Playing While Chitchatting by GPT-4 Agents\n",
|
||||
"\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
|
||||
"\n",
|
||||
"This notebook is modified based on https://github.com/ekzhu/FLAML/blob/evaluation/evaluation/chess/play_chess.ipynb\n",
|
||||
"\n",
|
||||
@@ -35,7 +35,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"# %pip install flaml[autogen]~=2.0.0\n",
|
||||
"# %pip install flaml[autogen]~=2.1.0\n",
|
||||
"%pip install chess -U"
|
||||
]
|
||||
},
|
||||
@@ -79,6 +79,7 @@
|
||||
"# \"model\": {\n",
|
||||
"# \"gpt-3.5-turbo\",\n",
|
||||
"# \"gpt-3.5-turbo-16k\",\n",
|
||||
"# \"gpt-3.5-turbo-16k-0613\",\n",
|
||||
"# \"gpt-3.5-turbo-0301\",\n",
|
||||
"# \"chatgpt-35-turbo-0301\",\n",
|
||||
"# \"gpt-35-turbo-v0301\",\n",
|
||||
@@ -157,7 +158,7 @@
|
||||
" llm_config={\"temperature\": 0.0, \"config_list\": config_list_gpt4},\n",
|
||||
" max_consecutive_auto_reply=10,\n",
|
||||
" )\n",
|
||||
" self.register_auto_reply(autogen.ResponsiveAgent, BoardAgent._generate_board_reply)\n",
|
||||
" self.register_reply(autogen.ConversableAgent, BoardAgent._generate_board_reply)\n",
|
||||
" self.board = board\n",
|
||||
" self.correct_move_messages = defaultdict(list)\n",
|
||||
"\n",
|
||||
@@ -225,8 +226,8 @@
|
||||
" max_consecutive_auto_reply=max_turns,\n",
|
||||
" **kwargs,\n",
|
||||
" )\n",
|
||||
" self.register_auto_reply(BoardAgent, ChessPlayerAgent._generate_reply_for_board, config=board_agent.board)\n",
|
||||
" self.register_auto_reply(ChessPlayerAgent, ChessPlayerAgent._generate_reply_for_player, config=board_agent)\n",
|
||||
" self.register_reply(BoardAgent, ChessPlayerAgent._generate_reply_for_board, config=board_agent.board)\n",
|
||||
" self.register_reply(ChessPlayerAgent, ChessPlayerAgent._generate_reply_for_player, config=board_agent)\n",
|
||||
" self.update_max_consecutive_auto_reply(board_agent.max_consecutive_auto_reply(), board_agent)\n",
|
||||
"\n",
|
||||
" def _generate_reply_for_board(\n",
|
||||
@@ -261,7 +262,7 @@
|
||||
" return True, None\n",
|
||||
" # converse with the board until a legal move is made or max allowed retries.\n",
|
||||
" # change silent to False to see that conversation.\n",
|
||||
" self.initiate_chat(board_agent, clear_history=False, message=message, silent=True)\n",
|
||||
" self.initiate_chat(board_agent, clear_history=False, message=message, silent=self.human_input_mode == \"NEVER\")\n",
|
||||
" # last message sent by the board agent\n",
|
||||
" last_message = self._oai_messages[board_agent][-1]\n",
|
||||
" if last_message[\"role\"] == \"assistant\":\n",
|
||||
@@ -1009,7 +1010,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.9.17"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# Auto Generated Agent Chat: Task Solving with Provided Tools as Functions\n",
|
||||
"\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
|
||||
"\n",
|
||||
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs need to be passed to `AssistantAgent` to initialize the agent. The corresponding functions need to be passed to `UserProxyAgent`, which will be responsible for executing any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to make sure the instructions align with the function call descriptions.\n",
|
||||
"\n",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"# Auto Generated Agent Chat: Group Chat\n",
|
||||
"\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
|
||||
"\n",
|
||||
"This notebook is modified based on https://github.com/microsoft/FLAML/blob/4ea686af5c3e8ff24d9076a7a626c8b28ab5b1d7/notebook/autogen_multiagent_roleplay_chat.ipynb\n",
|
||||
"\n",
|
||||
@@ -30,12 +30,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 105,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"# %pip install flaml[autogen]~=2.0.1"
|
||||
"# %pip install flaml[autogen]~=2.0.2"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -50,7 +50,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 106,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -122,28 +122,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 107,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_config = {\"config_list\": config_list_gpt4}\n",
|
||||
"human = autogen.UserProxyAgent(\n",
|
||||
" name=\"Human\",\n",
|
||||
"llm_config = {\"config_list\": config_list_gpt4, \"seed\": 42}\n",
|
||||
"user_proxy = autogen.UserProxyAgent(\n",
|
||||
" name=\"User_proxy\",\n",
|
||||
" system_message=\"A human admin.\",\n",
|
||||
" code_execution_config={\"last_n_messages\": 2, \"work_dir\": \"groupchat\"},\n",
|
||||
" human_input_mode=\"TERMINATE\"\n",
|
||||
")\n",
|
||||
"alice = autogen.AssistantAgent(\n",
|
||||
" name=\"Alice\",\n",
|
||||
"coder = autogen.AssistantAgent(\n",
|
||||
" name=\"Coder\",\n",
|
||||
" llm_config=llm_config,\n",
|
||||
")\n",
|
||||
"bob = autogen.AssistantAgent(\n",
|
||||
" name=\"Bob\",\n",
|
||||
" system_message=\"Code and answer reviewer.\"\n",
|
||||
" \"For code, prevent code execution if unsafe or missing important details, e.g., sort order in arxiv API. Suggest changes. Otherwise, approve and return the final code to execute.\"\n",
|
||||
" \"For answer, carefully check the interpretation of code result and fix any errors. If the interpretation is correct, approve and return the final answer to the user.\",\n",
|
||||
"pm = autogen.AssistantAgent(\n",
|
||||
" name=\"Product_manager\",\n",
|
||||
" system_message=\"Creative in software product ideas.\",\n",
|
||||
" llm_config=llm_config,\n",
|
||||
")\n",
|
||||
"groupchat = autogen.GroupChat(agents=[human, alice, bob], messages=[], max_round=12)\n",
|
||||
"groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12)\n",
|
||||
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)"
|
||||
]
|
||||
},
|
||||
@@ -157,139 +156,112 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 108,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mHuman\u001b[0m (to chat_manager):\n",
|
||||
"\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Find a latest paper about gpt-4\n",
|
||||
"Find a latest paper about gpt-4 on arxiv and find its potential applications in software.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mAlice\u001b[0m (to chat_manager):\n",
|
||||
"\u001b[33mCoder\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"To find the latest papers about gpt-4, we can use APIs of scholarly databases like Google Scholar, PubMed, Arxiv, etc., or automation tools like Beautiful Soup to scrape the information from webpages. However, a significant number of these methods are in violation of the terms of service of these platforms. Therefore, it's recommended to manually search these databases. \n",
|
||||
"\n",
|
||||
"But in our case, we'll use the arXiv API, which is a freely accessible database. It holds a vast collection of articles in the field of computer science and many other disciplines. It is often used by researchers to share their papers before they are published, so it could include articles about GPT-4. \n",
|
||||
"\n",
|
||||
"The following Python code uses the requests and feedparser libraries to do this. Requests is used to make a GET request to the arXiv API with the search query as a parameter. Feedparser is used to parse the returned RSS feed. The result is the information of the most recent articles based on publication time related to gpt-4 from arXiv. \n",
|
||||
"To find the latest paper about GPT-4 on arxiv, I'll provide you with a Python code that fetches the most recent papers from the arxiv API and filters the results to get the most relevant paper related to GPT-4. After fetching the paper, I'll extract the information for potential applications in software. Please execute the following Python code:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# python code\n",
|
||||
"import requests\n",
|
||||
"import feedparser\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"import re\n",
|
||||
"\n",
|
||||
"# Search arXiv API for papers related to gpt-4\n",
|
||||
"url = 'http://export.arxiv.org/api/query'\n",
|
||||
"params = {'search_query': 'all:gpt-4', 'sortBy': 'submittedDate', 'sortOrder': 'descending'}\n",
|
||||
"response = requests.get(url, params=params)\n",
|
||||
"def fetch_arxiv_papers(query):\n",
|
||||
" base_url = \"http://export.arxiv.org/api/query?\"\n",
|
||||
" search_query = \"all:\" + query\n",
|
||||
" response = requests.get(base_url, params={\"search_query\": search_query, \"sortBy\": \"submittedDate\", \"sortOrder\": \"descending\"})\n",
|
||||
" return BeautifulSoup(response.content, \"xml\")\n",
|
||||
"\n",
|
||||
"# Parse the response\n",
|
||||
"feeds = feedparser.parse(response.content)\n",
|
||||
"def find_gpt4_paper():\n",
|
||||
" papers = fetch_arxiv_papers(\"gpt-4\")\n",
|
||||
" for entry in papers.find_all(\"entry\"):\n",
|
||||
" title = entry.title.text.strip()\n",
|
||||
" summary = entry.summary.text.strip()\n",
|
||||
" if \"gpt-4\" in title.lower() or \"gpt-4\" in summary.lower():\n",
|
||||
" return {\"title\": title, \"summary\": summary}\n",
|
||||
"\n",
|
||||
"# should check if feeds.entries is empty\n",
|
||||
"\n",
|
||||
"# Get the first paper's information\n",
|
||||
"latest_paper = feeds.entries[0]\n",
|
||||
"\n",
|
||||
"# Print the paper's title, authors, and published date\n",
|
||||
"print('Title: ', latest_paper['title'])\n",
|
||||
"print('Authors: ', latest_paper['author'])\n",
|
||||
"print('Published Date: ', latest_paper['published'])\n",
|
||||
"```\n",
|
||||
"You need to install requests and feedparser in your python environment, if they are not installed yet.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mBob\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"The provided code is mostly correct, but it is missing some important error handling measures. As mentioned in the comments, it does not check if the feed entries are empty. It also does not check if the request was successful before trying to parse the response.\n",
|
||||
"\n",
|
||||
"Additionally, the code does not handle any exceptions that may occur during the process, such adding try/except clause would make it more robust.\n",
|
||||
"\n",
|
||||
"Here's a suggestion on how to improve it:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# python code\n",
|
||||
"import requests\n",
|
||||
"import feedparser\n",
|
||||
"\n",
|
||||
"# Search arXiv API for papers related to gpt-4\n",
|
||||
"url = 'http://export.arxiv.org/api/query'\n",
|
||||
"params = {'search_query': 'all:gpt-4', 'sortBy': 'submittedDate', 'sortOrder': 'descending'}\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" response = requests.get(url, params=params)\n",
|
||||
" response.raise_for_status()\n",
|
||||
"except requests.HTTPError as http_err:\n",
|
||||
" print(f'HTTP error occurred: {http_err}')\n",
|
||||
"except requests.ConnectionError as conn_err:\n",
|
||||
" print(f'Error connecting: {conn_err}')\n",
|
||||
"except requests.Timeout as to_err:\n",
|
||||
" print(f'Timeout error: {to_err}')\n",
|
||||
"except requests.RequestException as err:\n",
|
||||
" print(f'An error occurred: {err}')\n",
|
||||
"gpt4_paper = find_gpt4_paper()\n",
|
||||
"if gpt4_paper:\n",
|
||||
" print(\"Title:\", gpt4_paper[\"title\"])\n",
|
||||
" print(\"Summary:\", gpt4_paper[\"summary\"])\n",
|
||||
"else:\n",
|
||||
" # Parse the response\n",
|
||||
" feeds = feedparser.parse(response.content)\n",
|
||||
"\n",
|
||||
" # Check if feeds.entries is empty\n",
|
||||
" if not feeds.entries:\n",
|
||||
" print(\"No results found.\")\n",
|
||||
" else: \n",
|
||||
" # Get the first paper's information\n",
|
||||
" latest_paper = feeds.entries[0]\n",
|
||||
"\n",
|
||||
" # Print the paper's title, authors, and published date\n",
|
||||
" print('Title: ', latest_paper['title'])\n",
|
||||
" print('Authors: ', latest_paper['author'])\n",
|
||||
" print('Published Date: ', latest_paper['published'])\n",
|
||||
" print(\"No recent GPT-4 papers found.\")\n",
|
||||
"```\n",
|
||||
"This version of the script will handle the HTTP, Connection, and Timeout errors as well as any other request error. It also checks that there are results before trying to print.\n",
|
||||
"\n",
|
||||
"Once we have the paper details, I'll analyze the summary to identify potential applications in software development.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001b[33mHuman\u001b[0m (to chat_manager):\n",
|
||||
"\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"exitcode: 0 (execution succeeded)\n",
|
||||
"Code output: \n",
|
||||
"Title: Language as Reality: A Co-Creative Storytelling Game Experience in 1001\n",
|
||||
" Nights using Generative AI\n",
|
||||
"Authors: Ali Asadipour\n",
|
||||
"Published Date: 2023-08-24T16:42:23Z\n",
|
||||
"Title: FIMO: A Challenge Formal Dataset for Automated Theorem Proving\n",
|
||||
"Summary: We present FIMO, an innovative dataset comprising formal mathematical problem\n",
|
||||
"statements sourced from the International Mathematical Olympiad (IMO)\n",
|
||||
"Shortlisted Problems. Designed to facilitate advanced automated theorem proving\n",
|
||||
"at the IMO level, FIMO is currently tailored for the Lean formal language. It\n",
|
||||
"comprises 149 formal problem statements, accompanied by both informal problem\n",
|
||||
"descriptions and their corresponding LaTeX-based informal proofs. Through\n",
|
||||
"initial experiments involving GPT-4, our findings underscore the existing\n",
|
||||
"limitations in current methodologies, indicating a substantial journey ahead\n",
|
||||
"before achieving satisfactory IMO-level automated theorem proving outcomes.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mAlice\u001b[0m (to chat_manager):\n",
|
||||
"\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Based on the output of the code, the latest paper on GPT-4 found was \"Language as Reality: A Co-Creative Storytelling Game Experience in 1001 Nights using Generative AI\" by Ali Asadipour, published on 24th August 2023. \n",
|
||||
"Based on the paper titled \"FIMO: A Challenge Formal Dataset for Automated Theorem Proving\" and its summary, the potential applications of GPT-4 in software development can be related to the field of automated theorem proving.\n",
|
||||
"\n",
|
||||
"This shows the latest found document related to the search term \"GPT-4\" in the arXiv scholarly paper database. Please note that the search query includes all parts of the articles: the title, the abstract, and the full text if available, so it might not be necessarily about GPT-4 but includes the term in some section of the paper.\n",
|
||||
"1. **Automated theorem proving**: GPT-4 can be utilized in the development of automated theorem proving software that attempts to prove complex mathematical problems taken from International Mathematical Olympiad (IMO) or other challenging sources. By fine-tuning GPT-4 with a dataset like FIMO consisting of formal mathematical problems, the model can potentially better understand the problem statements and generate appropriate proofs.\n",
|
||||
"\n",
|
||||
"2. **Mathematical problem-solving assistants**: Software tools can be developed using GPT-4 to guide users in solving complex mathematical problems. The AI model can be integrated into educational platforms, online math tutoring services, or even standalone tools to help make solving problems easier and faster for students and professionals alike.\n",
|
||||
"\n",
|
||||
"3. **Formal language translation**: GPT-4 can potentially be integrated into software for translating between formal languages, assisting in the understanding and comparison of various formal systems. This would be especially useful in research communities employing different formal languages and wanting to share ideas and results.\n",
|
||||
"\n",
|
||||
"4. **Mathematical proof checking**: GPT-4 can be employed in proof-checking software to identify and correct inconsistencies. By improving the correctness of proofs, this application would ultimately help users save time and contribute to the overall quality of mathematical research.\n",
|
||||
"\n",
|
||||
"Please note that this paper highlights the current limitations of GPT-4 in the context of IMO-level theorem proving. Nevertheless, these potential applications suggest directions for further research and software development as the model and related techniques continue to improve.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
|
||||
"\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
|
||||
"\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mCoder\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"TERMINATE\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mBob\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"The interpretation of the code output is correct. The latest document retrieved related to \"GPT-4\" is indeed \"Language as Reality: A Co-Creative Storytelling Game Experience in 1001 Nights using Generative AI\" by Ali Asadipour, published on 24th August 2023.\n",
|
||||
"\n",
|
||||
"Make sure to understand that the document might not be solely about GPT-4 as the search used covered all aspects of the document. The term \"GPT-4\" could be mentioned anywhere in the paper.\n",
|
||||
"\n",
|
||||
"APPROVED\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"human.initiate_chat(manager, message=\"Find a latest paper about gpt-4\")\n",
|
||||
"user_proxy.initiate_chat(manager, message=\"Find a latest paper about gpt-4 on arxiv and find its potential applications in software.\")\n",
|
||||
"# type exit to terminate the chat"
|
||||
]
|
||||
}
|
||||
|
||||
566
notebook/autogen_agentchat_groupchat_research.ipynb
Normal file
566
notebook/autogen_agentchat_groupchat_research.ipynb
Normal file
@@ -0,0 +1,566 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/autogen_agentchat_groupchat_research.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Auto Generated Agent Chat: Performs Research with Multi-Agent Group Chat\n",
|
||||
"\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
|
||||
"\n",
|
||||
"## Requirements\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.8`. To run this notebook example, please install flaml with the [autogen] option:\n",
|
||||
"```bash\n",
|
||||
"pip install flaml[autogen]\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"# %pip install flaml[autogen]~=2.0.3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set your API Endpoint\n",
|
||||
"\n",
|
||||
"The [`config_list_from_json`](https://microsoft.github.io/FLAML/docs/reference/autogen/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from flaml import autogen\n",
|
||||
"\n",
|
||||
"config_list_gpt4 = autogen.config_list_from_json(\n",
|
||||
" \"OAI_CONFIG_LIST\",\n",
|
||||
" filter_dict={\n",
|
||||
" \"model\": [\"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-4-32k-v0314\"],\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It first looks for environment variable \"OAI_CONFIG_LIST\" which needs to be a valid json string. If that variable is not found, it then looks for a json file named \"OAI_CONFIG_LIST\". It filters the configs by models (you can filter by other keys as well). Only the gpt-4-32k models are kept in the list based on the filter condition.\n",
|
||||
"\n",
|
||||
"The config list looks like the following:\n",
|
||||
"```python\n",
|
||||
"config_list = [\n",
|
||||
" {\n",
|
||||
" 'model': 'gpt-4-32k',\n",
|
||||
" 'api_key': '<your OpenAI API key here>',\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" 'model': 'gpt-4-32k',\n",
|
||||
" 'api_key': '<your Azure OpenAI API key here>',\n",
|
||||
" 'api_base': '<your Azure OpenAI API base here>',\n",
|
||||
" 'api_type': 'azure',\n",
|
||||
" 'api_version': '2023-06-01-preview',\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" 'model': 'gpt-4-32k-0314',\n",
|
||||
" 'api_key': '<your Azure OpenAI API key here>',\n",
|
||||
" 'api_base': '<your Azure OpenAI API base here>',\n",
|
||||
" 'api_type': 'azure',\n",
|
||||
" 'api_version': '2023-06-01-preview',\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"If you open this notebook in colab, you can upload your files by clicking the file icon on the left panel and then choose \"upload file\" icon.\n",
|
||||
"\n",
|
||||
"You can set the value of config_list in other ways you prefer, e.g., loading from a YAML file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Construct Agents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gpt4_config = {\n",
|
||||
" \"seed\": 42, # change the seed for different trials\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"config_list\": config_list_gpt4,\n",
|
||||
" \"request_timeout\": 120,\n",
|
||||
"}\n",
|
||||
"user_proxy = autogen.UserProxyAgent(\n",
|
||||
" name=\"Admin\",\n",
|
||||
" system_message=\"A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved by this admin.\",\n",
|
||||
" code_execution_config=False,\n",
|
||||
")\n",
|
||||
"engineer = autogen.AssistantAgent(\n",
|
||||
" name=\"Engineer\",\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
" system_message='''Engineer. You follow an approved plan. You write python/shell code to solve tasks. Wrap the code in a code block that specifies the script type. The user can't modify your code. So do not suggest incomplete code which requires others to modify. Don't use a code block if it's not intended to be executed by the executor.\n",
|
||||
"Don't include multiple code blocks in one response. Do not ask others to copy and paste the result. Check the execution result returned by the executor.\n",
|
||||
"If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.\n",
|
||||
"''',\n",
|
||||
")\n",
|
||||
"scientist = autogen.AssistantAgent(\n",
|
||||
" name=\"Scientist\",\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
" system_message=\"\"\"Scientist. You follow an approved plan. You are able to categorize papers after seeing their abstracts printed. You don't write code.\"\"\"\n",
|
||||
")\n",
|
||||
"planner = autogen.AssistantAgent(\n",
|
||||
" name=\"Planner\",\n",
|
||||
" system_message='''Planner. Suggest a plan. Revise the plan based on feedback from admin and critic, until admin approval.\n",
|
||||
"The plan may involve an engineer who can write code and a scientist who doesn't write code.\n",
|
||||
"Explain the plan first. Be clear which step is performed by an engineer, and which step is performed by a scientist.\n",
|
||||
"''',\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
")\n",
|
||||
"executor = autogen.UserProxyAgent(\n",
|
||||
" name=\"Executor\",\n",
|
||||
" system_message=\"Executor. Execute the code written by the engineer and report the result.\",\n",
|
||||
" human_input_mode=\"NEVER\",\n",
|
||||
" code_execution_config={\"last_n_messages\": 3, \"work_dir\": \"paper\"},\n",
|
||||
")\n",
|
||||
"critic = autogen.AssistantAgent(\n",
|
||||
" name=\"Critic\",\n",
|
||||
" system_message=\"Critic. Double check plan, claims, code from other agents and provide feedback. Check whether the plan includes adding verifiable info such as source URL.\",\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
")\n",
|
||||
"groupchat = autogen.GroupChat(agents=[user_proxy, engineer, scientist, planner, executor, critic], messages=[], max_round=50)\n",
|
||||
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt4_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Start Chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mAdmin\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mPlanner\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Plan:\n",
|
||||
"\n",
|
||||
"1. Engineer: Write a script to scrape the arXiv website for papers related to LLM (Language Model) applications published in the last week. The script should extract the title, authors, abstract, and link to the paper.\n",
|
||||
"\n",
|
||||
"2. Scientist: Review the scraped data to identify the different domains in which LLM is applied. This could be based on keywords in the title or abstract, or the scientist's knowledge of the field.\n",
|
||||
"\n",
|
||||
"3. Engineer: Modify the script to categorize the papers based on the domains identified by the scientist. The script should output a markdown table with columns for the domain, title, authors, abstract, and link.\n",
|
||||
"\n",
|
||||
"4. Scientist: Review the markdown table to ensure the papers are correctly categorized and the information is accurate.\n",
|
||||
"\n",
|
||||
"5. Engineer: Make any necessary revisions to the script based on the scientist's feedback.\n",
|
||||
"\n",
|
||||
"6. Scientist: Give final approval of the markdown table.\n",
|
||||
"\n",
|
||||
"7. Engineer: Submit the final markdown table.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mCritic\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"The plan seems solid and well-structured. However, it lacks the inclusion of verifiable information such as source URLs. Here's a revised version:\n",
|
||||
"\n",
|
||||
"1. Engineer: Write a script to scrape the arXiv website for papers related to LLM (Language Model) applications published in the last week. The script should extract the title, authors, abstract, and link to the paper.\n",
|
||||
"\n",
|
||||
"2. Scientist: Review the scraped data to identify the different domains in which LLM is applied. This could be based on keywords in the title or abstract, or the scientist's knowledge of the field.\n",
|
||||
"\n",
|
||||
"3. Engineer: Modify the script to categorize the papers based on the domains identified by the scientist. The script should output a markdown table with columns for the domain, title, authors, abstract, and link (source URL).\n",
|
||||
"\n",
|
||||
"4. Scientist: Review the markdown table to ensure the papers are correctly categorized and the information is accurate. Check the source URLs to verify the information.\n",
|
||||
"\n",
|
||||
"5. Engineer: Make any necessary revisions to the script based on the scientist's feedback.\n",
|
||||
"\n",
|
||||
"6. Scientist: Give final approval of the markdown table, ensuring all entries have a valid source URL.\n",
|
||||
"\n",
|
||||
"7. Engineer: Submit the final markdown table.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
|
||||
"\u001b[33mAdmin\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mEngineer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"I'm sorry for the confusion, but as an AI model developed by OpenAI, I don't have the ability to directly scrape websites or access real-time data from external databases or websites, including arXiv. However, I can help you write a Python script that uses the arXiv API to fetch the data you need.\n",
|
||||
"\n",
|
||||
"Here's a Python script that uses the `arxiv` package to search for papers related to \"LLM applications\" from the last week. This script will print out the title, authors, summary, and URL of each paper.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import arxiv\n",
|
||||
"import datetime\n",
|
||||
"\n",
|
||||
"# Calculate the date one week ago\n",
|
||||
"one_week_ago = (datetime.datetime.now() - datetime.timedelta(days=7)).strftime('%Y%m%d%H%M%S')\n",
|
||||
"\n",
|
||||
"# Search for papers on LLM applications\n",
|
||||
"search = arxiv.Search(\n",
|
||||
" query=\"LLM applications\",\n",
|
||||
" max_results=100,\n",
|
||||
" sort_by=arxiv.SortCriterion.SubmittedDate,\n",
|
||||
" sort_order=arxiv.SortOrder.Descending\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for result in search.get():\n",
|
||||
" # Check if the paper was submitted in the last week\n",
|
||||
" if result.submitted.strftime('%Y%m%d%H%M%S') > one_week_ago:\n",
|
||||
" print(f\"Title: {result.title}\")\n",
|
||||
" print(f\"Authors: {', '.join(author.name for author in result.authors)}\")\n",
|
||||
" print(f\"Summary: {result.summary}\")\n",
|
||||
" print(f\"URL: {result.entry_id}\")\n",
|
||||
" print(\"\\n\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can install the `arxiv` package using pip:\n",
|
||||
"\n",
|
||||
"```shell\n",
|
||||
"pip install arxiv\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Please note that you'll need to replace `\"LLM applications\"` with the actual search term you're interested in. The arXiv API doesn't support searching by the submission date, so the script fetches the 100 most recent results and filters them manually. You may need to increase `max_results` if there are more than 100 papers submitted per week on your topic.\n",
|
||||
"\n",
|
||||
"To create a markdown table, you'll need to format the output like this:\n",
|
||||
"\n",
|
||||
"```markdown\n",
|
||||
"| Title | Authors | Summary | URL |\n",
|
||||
"|-------|---------|---------|-----|\n",
|
||||
"| ... | ... | ... | ... |\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can do this in Python using f-strings:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"print(\"| Title | Authors | Summary | URL |\")\n",
|
||||
"print(\"|-------|---------|---------|-----|\")\n",
|
||||
"for result in search.get():\n",
|
||||
" # Check if the paper was submitted in the last week\n",
|
||||
" if result.submitted.strftime('%Y%m%d%H%M%S') > one_week_ago:\n",
|
||||
" print(f\"| {result.title} | {', '.join(author.name for author in result.authors)} | {result.summary} | {result.entry_id} |\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will print a markdown table to the console. If you want to write the table to a file, you can open a file and use the `write` method instead of `print`.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001b[33mExecutor\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"exitcode: 1 (execution failed)\n",
|
||||
"Code output: \n",
|
||||
":15: DeprecationWarning: The 'get' method is deprecated, use 'results' instead\n",
|
||||
" for result in search.get():\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"\", line 17, in <module>\n",
|
||||
" if result.submitted.strftime('%Y%m%d%H%M%S') > one_week_ago:\n",
|
||||
"AttributeError: 'Result' object has no attribute 'submitted'\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mEngineer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"I apologize for the confusion. It seems there was a mistake in the code. The `arxiv` package's `Result` object doesn't have a `submitted` attribute. Instead, we should use the `published` attribute to get the date when the paper was published.\n",
|
||||
"\n",
|
||||
"Here's the corrected code:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import arxiv\n",
|
||||
"import datetime\n",
|
||||
"\n",
|
||||
"# Calculate the date one week ago\n",
|
||||
"one_week_ago = (datetime.datetime.now() - datetime.timedelta(days=7)).strftime('%Y%m%d%H%M%S')\n",
|
||||
"\n",
|
||||
"# Search for papers on LLM applications\n",
|
||||
"search = arxiv.Search(\n",
|
||||
" query=\"LLM applications\",\n",
|
||||
" max_results=100,\n",
|
||||
" sort_by=arxiv.SortCriterion.Relevance,\n",
|
||||
" sort_order=arxiv.SortOrder.Descending\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"| Title | Authors | Summary | URL |\")\n",
|
||||
"print(\"|-------|---------|---------|-----|\")\n",
|
||||
"for result in search.results():\n",
|
||||
" # Check if the paper was published in the last week\n",
|
||||
" if result.published.strftime('%Y%m%d%H%M%S') > one_week_ago:\n",
|
||||
" print(f\"| {result.title} | {', '.join(author.name for author in result.authors)} | {result.summary} | {result.entry_id} |\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This script will print a markdown table of papers related to \"LLM applications\" that were published in the last week. The table includes the title, authors, summary, and URL of each paper.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001b[33mExecutor\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"exitcode: 0 (execution succeeded)\n",
|
||||
"Code output: \n",
|
||||
"| Title | Authors | Summary | URL |\n",
|
||||
"|-------|---------|---------|-----|\n",
|
||||
"| Large Language Models as Data Preprocessors | Haochen Zhang, Yuyang Dong, Chuan Xiao, Masafumi Oyamada | Large Language Models (LLMs), typified by OpenAI's GPT series and Meta's\n",
|
||||
"LLaMA variants, have marked a significant advancement in artificial\n",
|
||||
"intelligence. Trained on vast amounts of text data, LLMs are capable of\n",
|
||||
"understanding and generating human-like text across a diverse range of topics.\n",
|
||||
"This study expands on the applications of LLMs, exploring their potential in\n",
|
||||
"data preprocessing, a critical stage in data mining and analytics applications.\n",
|
||||
"We delve into the applicability of state-of-the-art LLMs such as GPT-3.5,\n",
|
||||
"GPT-4, and Vicuna-13B for error detection, data imputation, schema matching,\n",
|
||||
"and entity matching tasks. Alongside showcasing the inherent capabilities of\n",
|
||||
"LLMs, we highlight their limitations, particularly in terms of computational\n",
|
||||
"expense and inefficiency. We propose an LLM-based framework for data\n",
|
||||
"preprocessing, which integrates cutting-edge prompt engineering techniques,\n",
|
||||
"coupled with traditional methods like contextualization and feature selection,\n",
|
||||
"to improve the performance and efficiency of these models. The effectiveness of\n",
|
||||
"LLMs in data preprocessing is evaluated through an experimental study spanning\n",
|
||||
"12 datasets. GPT-4 emerged as a standout, achieving 100\\% accuracy or F1 score\n",
|
||||
"on 4 datasets, suggesting LLMs' immense potential in these tasks. Despite\n",
|
||||
"certain limitations, our study underscores the promise of LLMs in this domain\n",
|
||||
"and anticipates future developments to overcome current hurdles. | http://arxiv.org/abs/2308.16361v1 |\n",
|
||||
"| Large language models in medicine: the potentials and pitfalls | Jesutofunmi A. Omiye, Haiwen Gui, Shawheen J. Rezaei, James Zou, Roxana Daneshjou | Large language models (LLMs) have been applied to tasks in healthcare,\n",
|
||||
"ranging from medical exam questions to responding to patient questions. With\n",
|
||||
"increasing institutional partnerships between companies producing LLMs and\n",
|
||||
"healthcare systems, real world clinical application is coming closer to\n",
|
||||
"reality. As these models gain traction, it is essential for healthcare\n",
|
||||
"practitioners to understand what LLMs are, their development, their current and\n",
|
||||
"potential applications, and the associated pitfalls when utilized in medicine.\n",
|
||||
"This review and accompanying tutorial aim to give an overview of these topics\n",
|
||||
"to aid healthcare practitioners in understanding the rapidly changing landscape\n",
|
||||
"of LLMs as applied to medicine. | http://arxiv.org/abs/2309.00087v1 |\n",
|
||||
"| Point-Bind & Point-LLM: Aligning Point Cloud with Multi-modality for 3D Understanding, Generation, and Instruction Following | Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xianzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xianzhi Li, Hongsheng Li, Pheng-Ann Heng | We introduce Point-Bind, a 3D multi-modality model aligning point clouds with\n",
|
||||
"2D image, language, audio, and video. Guided by ImageBind, we construct a joint\n",
|
||||
"embedding space between 3D and multi-modalities, enabling many promising\n",
|
||||
"applications, e.g., any-to-3D generation, 3D embedding arithmetic, and 3D\n",
|
||||
"open-world understanding. On top of this, we further present Point-LLM, the\n",
|
||||
"first 3D large language model (LLM) following 3D multi-modal instructions. By\n",
|
||||
"parameter-efficient fine-tuning techniques, Point-LLM injects the semantics of\n",
|
||||
"Point-Bind into pre-trained LLMs, e.g., LLaMA, which requires no 3D instruction\n",
|
||||
"data, but exhibits superior 3D and multi-modal question-answering capacity. We\n",
|
||||
"hope our work may cast a light on the community for extending 3D point clouds\n",
|
||||
"to multi-modality applications. Code is available at\n",
|
||||
"https://github.com/ZiyuGuo99/Point-Bind_Point-LLM. | http://arxiv.org/abs/2309.00615v1 |\n",
|
||||
"| Where Would I Go Next? Large Language Models as Human Mobility Predictors | Xinglei Wang, Meng Fang, Zichao Zeng, Tao Cheng | Accurate human mobility prediction underpins many important applications\n",
|
||||
"across a variety of domains, including epidemic modelling, transport planning,\n",
|
||||
"and emergency responses. Due to the sparsity of mobility data and the\n",
|
||||
"stochastic nature of people's daily activities, achieving precise predictions\n",
|
||||
"of people's locations remains a challenge. While recently developed large\n",
|
||||
"language models (LLMs) have demonstrated superior performance across numerous\n",
|
||||
"language-related tasks, their applicability to human mobility studies remains\n",
|
||||
"unexplored. Addressing this gap, this article delves into the potential of LLMs\n",
|
||||
"for human mobility prediction tasks. We introduce a novel method, LLM-Mob,\n",
|
||||
"which leverages the language understanding and reasoning capabilities of LLMs\n",
|
||||
"for analysing human mobility data. We present concepts of historical stays and\n",
|
||||
"context stays to capture both long-term and short-term dependencies in human\n",
|
||||
"movement and enable time-aware prediction by using time information of the\n",
|
||||
"prediction target. Additionally, we design context-inclusive prompts that\n",
|
||||
"enable LLMs to generate more accurate predictions. Comprehensive evaluations of\n",
|
||||
"our method reveal that LLM-Mob excels in providing accurate and interpretable\n",
|
||||
"predictions, highlighting the untapped potential of LLMs in advancing human\n",
|
||||
"mobility prediction techniques. We posit that our research marks a significant\n",
|
||||
"paradigm shift in human mobility modelling, transitioning from building complex\n",
|
||||
"domain-specific models to harnessing general-purpose LLMs that yield accurate\n",
|
||||
"predictions through language instructions. The code for this work is available\n",
|
||||
"at https://github.com/xlwang233/LLM-Mob. | http://arxiv.org/abs/2308.15197v1 |\n",
|
||||
"| Interactively Robot Action Planning with Uncertainty Analysis and Active Questioning by Large Language Model | Kazuki Hori, Kanata Suzuki, Tetsuya Ogata | The application of the Large Language Model (LLM) to robot action planning\n",
|
||||
"has been actively studied. The instructions given to the LLM by natural\n",
|
||||
"language may include ambiguity and lack of information depending on the task\n",
|
||||
"context. It is possible to adjust the output of LLM by making the instruction\n",
|
||||
"input more detailed; however, the design cost is high. In this paper, we\n",
|
||||
"propose the interactive robot action planning method that allows the LLM to\n",
|
||||
"analyze and gather missing information by asking questions to humans. The\n",
|
||||
"method can minimize the design cost of generating precise robot instructions.\n",
|
||||
"We demonstrated the effectiveness of our method through concrete examples in\n",
|
||||
"cooking tasks. However, our experiments also revealed challenges in robot\n",
|
||||
"action planning with LLM, such as asking unimportant questions and assuming\n",
|
||||
"crucial information without asking. Shedding light on these issues provides\n",
|
||||
"valuable insights for future research on utilizing LLM for robotics. | http://arxiv.org/abs/2308.15684v1 |\n",
|
||||
"| AskIt: Unified Programming Interface for Programming with Large Language Models | Katsumi Okuda, Saman Amarasinghe | In the evolving landscape of software development, Large Language Models\n",
|
||||
"(LLMs) exhibit a unique phenomenon known as emergent abilities, demonstrating\n",
|
||||
"adeptness across numerous tasks, from text summarization to code generation.\n",
|
||||
"While these abilities open up novel avenues in software design and crafting,\n",
|
||||
"their incorporation presents substantial challenges. Developers grapple with\n",
|
||||
"decisions surrounding the direct embedding of LLMs within applications versus\n",
|
||||
"employing them for code generation. Moreover, effective prompt design becomes a\n",
|
||||
"critical concern, given the necessity of data extraction from natural language\n",
|
||||
"outputs. To address these intricacies, this paper introduces AskIt, a\n",
|
||||
"domain-specific language (DSL) specifically designed for LLMs. AskIt simplifies\n",
|
||||
"LLM integration, offering type-guided output control, template-based function\n",
|
||||
"definitions, and a unified interface that diminishes the distinction between\n",
|
||||
"LLM-based code generation and application integration. Furthermore, through\n",
|
||||
"Programming by Example (PBE), AskIt harnesses the power of few-shot learning at\n",
|
||||
"the programming language level. Our evaluations underscore AskIt's potency.\n",
|
||||
"Across 50 tasks, AskIt generated concise prompts for the given tasks, achieving\n",
|
||||
"a 16.14% reduction in prompt length relative to benchmarks. Additionally, by\n",
|
||||
"enabling the transition from direct LLM application usage to function\n",
|
||||
"generation, AskIt achieved significant speedups, as observed in our GSM8K\n",
|
||||
"benchmark experiments. Through these advancements, AskIt streamlines the\n",
|
||||
"integration of LLMs in software development, offering a more efficient,\n",
|
||||
"versatile approach for leveraging emergent abilities. The implementations of\n",
|
||||
"AskIt in TypeScript and Python are available at\n",
|
||||
"https://github.com/katsumiok/ts-askit and https://github.com/katsumiok/pyaskit,\n",
|
||||
"respectively. | http://arxiv.org/abs/2308.15645v1 |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mScientist\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Here are the papers related to LLM applications published in the last week:\n",
|
||||
"\n",
|
||||
"| Domain | Title | Authors | Summary | URL |\n",
|
||||
"|-------|---------|---------|-----|-----|\n",
|
||||
"| Data Preprocessing | [Large Language Models as Data Preprocessors](http://arxiv.org/abs/2308.16361v1) | Haochen Zhang, Yuyang Dong, Chuan Xiao, Masafumi Oyamada | This study expands on the applications of LLMs, exploring their potential in data preprocessing, a critical stage in data mining and analytics applications. | [Link](http://arxiv.org/abs/2308.16361v1) |\n",
|
||||
"| Medicine | [Large language models in medicine: the potentials and pitfalls](http://arxiv.org/abs/2309.00087v1) | Jesutofunmi A. Omiye, Haiwen Gui, Shawheen J. Rezaei, James Zou, Roxana Daneshjou | This review and accompanying tutorial aim to give an overview of these topics to aid healthcare practitioners in understanding the rapidly changing landscape of LLMs as applied to medicine. | [Link](http://arxiv.org/abs/2309.00087v1) |\n",
|
||||
"| 3D Understanding, Generation, and Instruction Following | [Point-Bind & Point-LLM: Aligning Point Cloud with Multi-modality for 3D Understanding, Generation, and Instruction Following](http://arxiv.org/abs/2309.00615v1) | Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xianzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xianzhi Li, Hongsheng Li, Pheng-Ann Heng | We introduce Point-Bind, a 3D multi-modality model aligning point clouds with 2D image, language, audio, and video. | [Link](http://arxiv.org/abs/2309.00615v1) |\n",
|
||||
"| Human Mobility Prediction | [Where Would I Go Next? Large Language Models as Human Mobility Predictors](http://arxiv.org/abs/2308.15197v1) | Xinglei Wang, Meng Fang, Zichao Zeng, Tao Cheng | This article delves into the potential of LLMs for human mobility prediction tasks. | [Link](http://arxiv.org/abs/2308.15197v1) |\n",
|
||||
"| Robotics | [Interactively Robot Action Planning with Uncertainty Analysis and Active Questioning by Large Language Model](http://arxiv.org/abs/2308.15684v1) | Kazuki Hori, Kanata Suzuki, Tetsuya Ogata | In this paper, we propose the interactive robot action planning method that allows the LLM to analyze and gather missing information by asking questions to humans. | [Link](http://arxiv.org/abs/2308.15684v1) |\n",
|
||||
"| Software Development | [AskIt: Unified Programming Interface for Programming with Large Language Models](http://arxiv.org/abs/2308.15645v1) | Katsumi Okuda, Saman Amarasinghe | This paper introduces AskIt, a domain-specific language (DSL) specifically designed for LLMs. | [Link](http://arxiv.org/abs/2308.15645v1) |\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mCritic\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"The scientist has done a good job categorizing the papers into different domains and providing a summary for each. The markdown table is correctly formatted and includes the source URL for each paper, which allows for verification of the information. The domains identified are diverse, indicating a broad range of applications for Large Language Models (LLMs). This review and categorization should provide a useful resource for anyone interested in the recent applications of LLMs.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"user_proxy.initiate_chat(\n",
|
||||
" manager,\n",
|
||||
" message=\"\"\"\n",
|
||||
"find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.\n",
|
||||
"\"\"\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Group Chat without Critic for Comparison"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mAdmin\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mPlanner\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Plan:\n",
|
||||
"\n",
|
||||
"1. Engineer: Write a script to scrape the arXiv website for papers related to LLM (Language Model) applications published in the last week. The script should extract the title, authors, abstract, and link to the paper.\n",
|
||||
"\n",
|
||||
"2. Scientist: Review the scraped data to identify the different domains in which LLM is applied. This could be based on keywords in the title or abstract, or the scientist's knowledge of the field.\n",
|
||||
"\n",
|
||||
"3. Engineer: Modify the script to categorize the papers based on the domains identified by the scientist. The script should output a markdown table with columns for the domain, title, authors, abstract, and link.\n",
|
||||
"\n",
|
||||
"4. Scientist: Review the markdown table to ensure the papers are correctly categorized and the information is accurate.\n",
|
||||
"\n",
|
||||
"5. Engineer: Make any necessary revisions to the script based on the scientist's feedback.\n",
|
||||
"\n",
|
||||
"6. Scientist: Give final approval of the markdown table.\n",
|
||||
"\n",
|
||||
"7. Engineer: Submit the final markdown table.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"groupchat_nocritic = autogen.GroupChat(agents=[user_proxy, engineer, scientist, planner, executor], messages=[], max_round=50)\n",
|
||||
"for agent in groupchat.agents:\n",
|
||||
" agent.reset()\n",
|
||||
"manager_nocritic = autogen.GroupChatManager(groupchat=groupchat_nocritic, llm_config=gpt4_config)\n",
|
||||
"user_proxy.initiate_chat(\n",
|
||||
" manager_nocritic,\n",
|
||||
" message=\"\"\"\n",
|
||||
"find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.\n",
|
||||
"\"\"\",\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "flaml",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.17"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
1038
notebook/autogen_agentchat_groupchat_vis.ipynb
Normal file
1038
notebook/autogen_agentchat_groupchat_vis.ipynb
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -20,7 +20,7 @@
|
||||
"# Auto Generated Agent Chat: Collaborative Task Solving with Coding and Planning Agent\n",
|
||||
"\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
|
||||
"\n",
|
||||
"In this notebook, we demonstrate how to use multiple agents to work together and accomplish a task which requires finding info from the web and coding. `AssistantAgent` is an LLM-based agent that can write and debug Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We further create a planning agent for the assistant agent to consult. The planning agent is a variation of the LLM-based `AssistantAgent` with a different system message.\n",
|
||||
"\n",
|
||||
@@ -45,7 +45,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# %pip install flaml[autogen]~=2.0.1 docker"
|
||||
"# %pip install flaml[autogen]~=2.0.2 docker"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -221,28 +221,31 @@
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"To suggest a fix to an open good first issue of FLAML, we first need to fetch the list of open issues labeled as \"good first issue\" from the FLAML GitHub repository. We can do this by using the GitHub API. Here is a Python script that uses the requests library to fetch the list of issues.\n",
|
||||
"To suggest a fix to an open good first issue of FLAML, we first need to fetch the list of open issues labeled as \"good first issue\" from the FLAML GitHub repository. We can do this using the GitHub API.\n",
|
||||
"\n",
|
||||
"Here is a Python script that uses the requests library to fetch the list of open issues labeled as \"good first issue\" from the FLAML GitHub repository.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# python code\n",
|
||||
"# filename: fetch_issues.py\n",
|
||||
"\n",
|
||||
"import requests\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"def fetch_issues():\n",
|
||||
" url = \"https://api.github.com/repos/microsoft/FLAML/issues\"\n",
|
||||
" response = requests.get(url, params={\"state\": \"open\", \"labels\": \"good first issue\"})\n",
|
||||
" params = {\n",
|
||||
" \"state\": \"open\",\n",
|
||||
" \"labels\": \"good first issue\"\n",
|
||||
" }\n",
|
||||
" response = requests.get(url, params=params)\n",
|
||||
" issues = response.json()\n",
|
||||
"\n",
|
||||
" for issue in issues:\n",
|
||||
" print(f\"Issue ID: {issue['id']}\")\n",
|
||||
" print(f\"Issue Title: {issue['title']}\")\n",
|
||||
" print(f\"Issue URL: {issue['html_url']}\")\n",
|
||||
" print(\"\\n\")\n",
|
||||
" print(f\"Issue ID: {issue['id']}, Title: {issue['title']}, URL: {issue['html_url']}\")\n",
|
||||
"\n",
|
||||
"fetch_issues()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This script will print the ID, title, and URL of each open issue labeled as \"good first issue\". You can run this script to get the list of issues. After that, I can help you to suggest a fix for one of the issues.\n",
|
||||
"Please run this script to fetch the list of open issues. After that, we can select one issue and suggest a fix for it.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
@@ -253,106 +256,26 @@
|
||||
"\n",
|
||||
"exitcode: 0 (execution succeeded)\n",
|
||||
"Code output: \n",
|
||||
"Issue ID: 1809297895\n",
|
||||
"Issue Title: Moving function execution out of UserProxyAgent to be an openai util\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/1135\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1799114476\n",
|
||||
"Issue Title: use_label_encoder warning with xgboost\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/1120\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1705274482\n",
|
||||
"Issue Title: Use appropriate wait time for retry based on the error message. \n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/1034\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1702580697\n",
|
||||
"Issue Title: Issues with Adding Custom APIs in Auto Generation\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/1029\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1658981020\n",
|
||||
"Issue Title: Running flaml[tune] using \"-O\" flag for python interpreter (optimization - disables assertions) crashes\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/981\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1560969891\n",
|
||||
"Issue Title: Conditional parameter flow2 crash\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/903\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1538549388\n",
|
||||
"Issue Title: indentation space\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/884\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1531028010\n",
|
||||
"Issue Title: Check if openml version is required\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/882\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1470354491\n",
|
||||
"Issue Title: Adjust the indent\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/834\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1456950742\n",
|
||||
"Issue Title: pip install flaml FAIL\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/821\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1441047067\n",
|
||||
"Issue Title: Isolate the ensemble part and expose it to users\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/807\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1440171793\n",
|
||||
"Issue Title: how to pass categorical features names or indices to learner\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/805\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1429945686\n",
|
||||
"Issue Title: Flaml/LightGBM - Shouldn't I found better/faster or equal results from FLAML than direct LightGBM?\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/785\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1408240042\n",
|
||||
"Issue Title: Add an announcement of the discord channel\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/764\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1396515109\n",
|
||||
"Issue Title: Documentation about small budget\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/748\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1378268096\n",
|
||||
"Issue Title: Make zero-shot automl more discoverable\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/737\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1189515901\n",
|
||||
"Issue Title: New HCrystalBall release\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/509\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1114253143\n",
|
||||
"Issue Title: samples about conversion to ONNX\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/429\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1107488969\n",
|
||||
"Issue Title: support anomaly detection\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/413\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1061332179\n",
|
||||
"Issue Title: CatBoost Fails with Keyword 'groups'\n",
|
||||
"Issue URL: https://github.com/microsoft/FLAML/issues/304\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Issue ID: 1809297895, Title: Moving function execution out of UserProxyAgent to be an openai util, URL: https://github.com/microsoft/FLAML/issues/1135\n",
|
||||
"Issue ID: 1799114476, Title: use_label_encoder warning with xgboost, URL: https://github.com/microsoft/FLAML/issues/1120\n",
|
||||
"Issue ID: 1705274482, Title: Use appropriate wait time for retry based on the error message. , URL: https://github.com/microsoft/FLAML/issues/1034\n",
|
||||
"Issue ID: 1702580697, Title: Issues with Adding Custom APIs in Auto Generation, URL: https://github.com/microsoft/FLAML/issues/1029\n",
|
||||
"Issue ID: 1658981020, Title: Running flaml[tune] using \"-O\" flag for python interpreter (optimization - disables assertions) crashes, URL: https://github.com/microsoft/FLAML/issues/981\n",
|
||||
"Issue ID: 1560969891, Title: Conditional parameter flow2 crash, URL: https://github.com/microsoft/FLAML/issues/903\n",
|
||||
"Issue ID: 1538549388, Title: indentation space, URL: https://github.com/microsoft/FLAML/issues/884\n",
|
||||
"Issue ID: 1531028010, Title: Check if openml version is required, URL: https://github.com/microsoft/FLAML/issues/882\n",
|
||||
"Issue ID: 1470354491, Title: Adjust the indent, URL: https://github.com/microsoft/FLAML/issues/834\n",
|
||||
"Issue ID: 1456950742, Title: pip install flaml FAIL, URL: https://github.com/microsoft/FLAML/issues/821\n",
|
||||
"Issue ID: 1441047067, Title: Isolate the ensemble part and expose it to users, URL: https://github.com/microsoft/FLAML/issues/807\n",
|
||||
"Issue ID: 1440171793, Title: how to pass categorical features names or indices to learner, URL: https://github.com/microsoft/FLAML/issues/805\n",
|
||||
"Issue ID: 1429945686, Title: Flaml/LightGBM - Shouldn't I found better/faster or equal results from FLAML than direct LightGBM?, URL: https://github.com/microsoft/FLAML/issues/785\n",
|
||||
"Issue ID: 1408240042, Title: Add an announcement of the discord channel, URL: https://github.com/microsoft/FLAML/issues/764\n",
|
||||
"Issue ID: 1396515109, Title: Documentation about small budget, URL: https://github.com/microsoft/FLAML/issues/748\n",
|
||||
"Issue ID: 1378268096, Title: Make zero-shot automl more discoverable, URL: https://github.com/microsoft/FLAML/issues/737\n",
|
||||
"Issue ID: 1189515901, Title: New HCrystalBall release, URL: https://github.com/microsoft/FLAML/issues/509\n",
|
||||
"Issue ID: 1114253143, Title: samples about conversion to ONNX, URL: https://github.com/microsoft/FLAML/issues/429\n",
|
||||
"Issue ID: 1107488969, Title: support anomaly detection, URL: https://github.com/microsoft/FLAML/issues/413\n",
|
||||
"Issue ID: 1061332179, Title: CatBoost Fails with Keyword 'groups', URL: https://github.com/microsoft/FLAML/issues/304\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
@@ -361,7 +284,7 @@
|
||||
"\u001b[32m***** Suggested function Call: ask_planner *****\u001b[0m\n",
|
||||
"Arguments: \n",
|
||||
"{\n",
|
||||
"\"message\": \"Here are the open good first issues of FLAML. Please suggest a plan to fix one of them. The issues are: \\n\\n1. [Moving function execution out of UserProxyAgent to be an openai util](https://github.com/microsoft/FLAML/issues/1135)\\n2. [use_label_encoder warning with xgboost](https://github.com/microsoft/FLAML/issues/1120)\\n3. [Use appropriate wait time for retry based on the error message](https://github.com/microsoft/FLAML/issues/1034)\\n4. [Issues with Adding Custom APIs in Auto Generation](https://github.com/microsoft/FLAML/issues/1029)\\n5. [Running flaml[tune] using \\\"-O\\\" flag for python interpreter (optimization - disables assertions) crashes](https://github.com/microsoft/FLAML/issues/981)\\n6. [Conditional parameter flow2 crash](https://github.com/microsoft/FLAML/issues/903)\\n7. [indentation space](https://github.com/microsoft/FLAML/issues/884)\\n8. [Check if openml version is required](https://github.com/microsoft/FLAML/issues/882)\\n9. [Adjust the indent](https://github.com/microsoft/FLAML/issues/834)\\n10. [pip install flaml FAIL](https://github.com/microsoft/FLAML/issues/821)\\n11. [Isolate the ensemble part and expose it to users](https://github.com/microsoft/FLAML/issues/807)\\n12. [how to pass categorical features names or indices to learner](https://github.com/microsoft/FLAML/issues/805)\\n13. [Flaml/LightGBM - Shouldn't I found better/faster or equal results from FLAML than direct LightGBM?](https://github.com/microsoft/FLAML/issues/785)\\n14. [Add an announcement of the discord channel](https://github.com/microsoft/FLAML/issues/764)\\n15. [Documentation about small budget](https://github.com/microsoft/FLAML/issues/748)\\n16. [Make zero-shot automl more discoverable](https://github.com/microsoft/FLAML/issues/737)\\n17. [New HCrystalBall release](https://github.com/microsoft/FLAML/issues/509)\\n18. [samples about conversion to ONNX](https://github.com/microsoft/FLAML/issues/429)\\n19. [support anomaly detection](https://github.com/microsoft/FLAML/issues/413)\\n20. [CatBoost Fails with Keyword 'groups'](https://github.com/microsoft/FLAML/issues/304)\"\n",
|
||||
"\"message\": \"We have fetched a list of open issues labeled as 'good first issue' from the FLAML GitHub repository. Now, we need to select one issue and suggest a fix for it. Could you please provide a plan for this?\"\n",
|
||||
"}\n",
|
||||
"\u001b[32m************************************************\u001b[0m\n",
|
||||
"\n",
|
||||
@@ -372,115 +295,92 @@
|
||||
">>>>>>>> EXECUTING FUNCTION ask_planner...\u001b[0m\n",
|
||||
"\u001b[33mplanner_user\u001b[0m (to planner):\n",
|
||||
"\n",
|
||||
"Here are the open good first issues of FLAML. Please suggest a plan to fix one of them. The issues are: \n",
|
||||
"\n",
|
||||
"1. [Moving function execution out of UserProxyAgent to be an openai util](https://github.com/microsoft/FLAML/issues/1135)\n",
|
||||
"2. [use_label_encoder warning with xgboost](https://github.com/microsoft/FLAML/issues/1120)\n",
|
||||
"3. [Use appropriate wait time for retry based on the error message](https://github.com/microsoft/FLAML/issues/1034)\n",
|
||||
"4. [Issues with Adding Custom APIs in Auto Generation](https://github.com/microsoft/FLAML/issues/1029)\n",
|
||||
"5. [Running flaml[tune] using \"-O\" flag for python interpreter (optimization - disables assertions) crashes](https://github.com/microsoft/FLAML/issues/981)\n",
|
||||
"6. [Conditional parameter flow2 crash](https://github.com/microsoft/FLAML/issues/903)\n",
|
||||
"7. [indentation space](https://github.com/microsoft/FLAML/issues/884)\n",
|
||||
"8. [Check if openml version is required](https://github.com/microsoft/FLAML/issues/882)\n",
|
||||
"9. [Adjust the indent](https://github.com/microsoft/FLAML/issues/834)\n",
|
||||
"10. [pip install flaml FAIL](https://github.com/microsoft/FLAML/issues/821)\n",
|
||||
"11. [Isolate the ensemble part and expose it to users](https://github.com/microsoft/FLAML/issues/807)\n",
|
||||
"12. [how to pass categorical features names or indices to learner](https://github.com/microsoft/FLAML/issues/805)\n",
|
||||
"13. [Flaml/LightGBM - Shouldn't I found better/faster or equal results from FLAML than direct LightGBM?](https://github.com/microsoft/FLAML/issues/785)\n",
|
||||
"14. [Add an announcement of the discord channel](https://github.com/microsoft/FLAML/issues/764)\n",
|
||||
"15. [Documentation about small budget](https://github.com/microsoft/FLAML/issues/748)\n",
|
||||
"16. [Make zero-shot automl more discoverable](https://github.com/microsoft/FLAML/issues/737)\n",
|
||||
"17. [New HCrystalBall release](https://github.com/microsoft/FLAML/issues/509)\n",
|
||||
"18. [samples about conversion to ONNX](https://github.com/microsoft/FLAML/issues/429)\n",
|
||||
"19. [support anomaly detection](https://github.com/microsoft/FLAML/issues/413)\n",
|
||||
"20. [CatBoost Fails with Keyword 'groups'](https://github.com/microsoft/FLAML/issues/304)\n",
|
||||
"We have fetched a list of open issues labeled as 'good first issue' from the FLAML GitHub repository. Now, we need to select one issue and suggest a fix for it. Could you please provide a plan for this?\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mplanner\u001b[0m (to planner_user):\n",
|
||||
"\n",
|
||||
"Let's try to fix the \"use_label_encoder warning with xgboost\" issue from the list. The plan involves:\n",
|
||||
"Sure, here's a plan for selecting one issue from the list and suggesting a fix for it:\n",
|
||||
"\n",
|
||||
"1. Investigate why the warning is raised:\n",
|
||||
"Go through the FLAML repository's source code, specifically the part that deals with xgboost implementation. Check if the 'use_label_encoder' parameter is actually used during model training. If yes, inspect which value it has been assigned and try to find out why this parameter may be causing the warning.\n",
|
||||
"1. Import the fetched list of open issues labeled as 'good first issue' from the FLAML GitHub repository into your AI assistant. \n",
|
||||
"2. Examine the list for common issue attributes like 'title', 'description', 'labels', 'issue number', 'created at', and 'updated at'. \n",
|
||||
"3. To select a suitable issue for fixing, apply a selection criteria based on your preferences, such as prioritizing by the 'created at' attribute in descending order to pick the most recent issue, or filtering by a specific label in addition to 'good first issue'. Write code to filter and sort the issues accordingly.\n",
|
||||
"4. Inspect the execution result. If the selection criteria are not applied correctly, modify the code to fix any errors.\n",
|
||||
"5. Once the issue is selected, read the issue's title, description, and any linked resources or documents to understand the problem to be solved.\n",
|
||||
"6. Break down the issue into smaller tasks that can be addressed by writing code, and create a step-by-step plan.\n",
|
||||
"\n",
|
||||
"2. Understand the role of use_label_encoder in XGBoost:\n",
|
||||
"Understand what the 'use_label_encoder' parameter does and why it would be important to XGBoost. This might require studying XGBoost's documentation and community discussions, particularly any discussions related to this warning.\n",
|
||||
"For instance, the following could be smaller tasks to address the selected issue:\n",
|
||||
" a. Understand the issue's background and requirements.\n",
|
||||
" b. Write clear and concise instructions to reproduce the issue.\n",
|
||||
" c. Analyze existing code or tests related to the issue.\n",
|
||||
" d. Devise a solution to fix the issue.\n",
|
||||
" e. Implement the solution in separate code pieces.\n",
|
||||
" f. Verify that the solution addresses the issue.\n",
|
||||
" g. Write unit tests to ensure the solution is robust and handles edge cases.\n",
|
||||
"\n",
|
||||
"3. Suggest modification safely without affecting model performance and other functionalities:\n",
|
||||
"Once you understand the role and importance of the 'use_label_encoder parameter', think about how you could modify its usage in FLAML's XGBoost implementation to prevent the warning. The plan you design should preserve the current functionalities and not negatively impact the model's performance.\n",
|
||||
"7. Inspect the execution result. If the issue is misunderstood or the tasks' breakdown is incorrect, revise the understanding of the issue and modify the tasks accordingly.\n",
|
||||
"8. With the defined tasks and step-by-step plan, work on each task, and test the implemented code to ensure the issue is solved.\n",
|
||||
"9. If any issues arise during the task execution, analyze the errors and adjust the plan or code accordingly.\n",
|
||||
"10. Once the issue is fixed, prepare a pull request on GitHub, mentioning the issue number and giving a brief description of the solution in the merge request.\n",
|
||||
"\n",
|
||||
"4. Implement the plan:\n",
|
||||
"Once you have a modification plan, implement it in the code. Ensure to follow any code style guides or standards set by the FLAML project.\n",
|
||||
"\n",
|
||||
"5. Test the changes:\n",
|
||||
"After changing your code, thoroughly test it to make sure the warning is no longer appearing and that your changes haven't caused any other issues. This involves running existing unit tests and creating new tests if necessary.\n",
|
||||
"\n",
|
||||
"6. Evaluate the change:\n",
|
||||
"Check again to ensure that model performance and functionality haven't been negatively affected. \n",
|
||||
"\n",
|
||||
"7. Create a Pull Request:\n",
|
||||
"Having made your changes and ensured everything is working correctly, submit your modification as a pull request to the original FLAML repository and follow any steps they have for the contribution process. \n",
|
||||
"\n",
|
||||
"8. Respond to review:\n",
|
||||
"The reviewers may have comments or require changes, be ready to address any that come up until the solution is accepted. \n",
|
||||
"\n",
|
||||
"Please note that each step in this plan requires coding and/or reasoning, which are critical to an AI assistant's operations.\n",
|
||||
"Remember that this is meant to be a general plan, and the specific tasks may vary depending on the selected issue. Adjust the plan as needed, based on the selected issue's requirements and your problem-solving approach.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33muser_proxy\u001b[0m (to assistant):\n",
|
||||
"\n",
|
||||
"\u001b[32m***** Response from calling function \"ask_planner\" *****\u001b[0m\n",
|
||||
"Let's try to fix the \"use_label_encoder warning with xgboost\" issue from the list. The plan involves:\n",
|
||||
"Sure, here's a plan for selecting one issue from the list and suggesting a fix for it:\n",
|
||||
"\n",
|
||||
"1. Investigate why the warning is raised:\n",
|
||||
"Go through the FLAML repository's source code, specifically the part that deals with xgboost implementation. Check if the 'use_label_encoder' parameter is actually used during model training. If yes, inspect which value it has been assigned and try to find out why this parameter may be causing the warning.\n",
|
||||
"1. Import the fetched list of open issues labeled as 'good first issue' from the FLAML GitHub repository into your AI assistant. \n",
|
||||
"2. Examine the list for common issue attributes like 'title', 'description', 'labels', 'issue number', 'created at', and 'updated at'. \n",
|
||||
"3. To select a suitable issue for fixing, apply a selection criteria based on your preferences, such as prioritizing by the 'created at' attribute in descending order to pick the most recent issue, or filtering by a specific label in addition to 'good first issue'. Write code to filter and sort the issues accordingly.\n",
|
||||
"4. Inspect the execution result. If the selection criteria are not applied correctly, modify the code to fix any errors.\n",
|
||||
"5. Once the issue is selected, read the issue's title, description, and any linked resources or documents to understand the problem to be solved.\n",
|
||||
"6. Break down the issue into smaller tasks that can be addressed by writing code, and create a step-by-step plan.\n",
|
||||
"\n",
|
||||
"2. Understand the role of use_label_encoder in XGBoost:\n",
|
||||
"Understand what the 'use_label_encoder' parameter does and why it would be important to XGBoost. This might require studying XGBoost's documentation and community discussions, particularly any discussions related to this warning.\n",
|
||||
"For instance, the following could be smaller tasks to address the selected issue:\n",
|
||||
" a. Understand the issue's background and requirements.\n",
|
||||
" b. Write clear and concise instructions to reproduce the issue.\n",
|
||||
" c. Analyze existing code or tests related to the issue.\n",
|
||||
" d. Devise a solution to fix the issue.\n",
|
||||
" e. Implement the solution in separate code pieces.\n",
|
||||
" f. Verify that the solution addresses the issue.\n",
|
||||
" g. Write unit tests to ensure the solution is robust and handles edge cases.\n",
|
||||
"\n",
|
||||
"3. Suggest modification safely without affecting model performance and other functionalities:\n",
|
||||
"Once you understand the role and importance of the 'use_label_encoder parameter', think about how you could modify its usage in FLAML's XGBoost implementation to prevent the warning. The plan you design should preserve the current functionalities and not negatively impact the model's performance.\n",
|
||||
"7. Inspect the execution result. If the issue is misunderstood or the tasks' breakdown is incorrect, revise the understanding of the issue and modify the tasks accordingly.\n",
|
||||
"8. With the defined tasks and step-by-step plan, work on each task, and test the implemented code to ensure the issue is solved.\n",
|
||||
"9. If any issues arise during the task execution, analyze the errors and adjust the plan or code accordingly.\n",
|
||||
"10. Once the issue is fixed, prepare a pull request on GitHub, mentioning the issue number and giving a brief description of the solution in the merge request.\n",
|
||||
"\n",
|
||||
"4. Implement the plan:\n",
|
||||
"Once you have a modification plan, implement it in the code. Ensure to follow any code style guides or standards set by the FLAML project.\n",
|
||||
"\n",
|
||||
"5. Test the changes:\n",
|
||||
"After changing your code, thoroughly test it to make sure the warning is no longer appearing and that your changes haven't caused any other issues. This involves running existing unit tests and creating new tests if necessary.\n",
|
||||
"\n",
|
||||
"6. Evaluate the change:\n",
|
||||
"Check again to ensure that model performance and functionality haven't been negatively affected. \n",
|
||||
"\n",
|
||||
"7. Create a Pull Request:\n",
|
||||
"Having made your changes and ensured everything is working correctly, submit your modification as a pull request to the original FLAML repository and follow any steps they have for the contribution process. \n",
|
||||
"\n",
|
||||
"8. Respond to review:\n",
|
||||
"The reviewers may have comments or require changes, be ready to address any that come up until the solution is accepted. \n",
|
||||
"\n",
|
||||
"Please note that each step in this plan requires coding and/or reasoning, which are critical to an AI assistant's operations.\n",
|
||||
"Remember that this is meant to be a general plan, and the specific tasks may vary depending on the selected issue. Adjust the plan as needed, based on the selected issue's requirements and your problem-solving approach.\n",
|
||||
"\u001b[32m********************************************************\u001b[0m\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"Based on the plan provided, here are the steps to fix the \"use_label_encoder warning with xgboost\" issue:\n",
|
||||
"Based on the plan, let's select the most recent issue from the list. The most recent issue is \"Moving function execution out of UserProxyAgent to be an openai util\" with the URL: https://github.com/microsoft/FLAML/issues/1135\n",
|
||||
"\n",
|
||||
"1. **Investigate why the warning is raised**: This step involves going through the FLAML repository's source code, specifically the part that deals with xgboost implementation. You need to check if the 'use_label_encoder' parameter is actually used during model training. If yes, inspect which value it has been assigned and try to find out why this parameter may be causing the warning.\n",
|
||||
"To suggest a fix for this issue, we need to understand the problem first. Please visit the issue URL and read the issue's description and any comments to understand the problem.\n",
|
||||
"\n",
|
||||
"2. **Understand the role of use_label_encoder in XGBoost**: This step requires studying XGBoost's documentation and community discussions, particularly any discussions related to this warning. You need to understand what the 'use_label_encoder' parameter does and why it would be important to XGBoost.\n",
|
||||
"Once you understand the problem, here are the steps you can follow to suggest a fix:\n",
|
||||
"\n",
|
||||
"3. **Suggest modification safely without affecting model performance and other functionalities**: Once you understand the role and importance of the 'use_label_encoder parameter', you need to think about how you could modify its usage in FLAML's XGBoost implementation to prevent the warning. The plan you design should preserve the current functionalities and not negatively impact the model's performance.\n",
|
||||
"1. Understand the issue's background and requirements: Read the issue description and any linked resources or documents to understand what needs to be done.\n",
|
||||
"\n",
|
||||
"4. **Implement the plan**: Once you have a modification plan, you need to implement it in the code. You should follow any code style guides or standards set by the FLAML project.\n",
|
||||
"2. Write clear and concise instructions to reproduce the issue: Based on the issue description, write down the steps to reproduce the issue. This will help you understand the problem better and will be useful when you test your fix.\n",
|
||||
"\n",
|
||||
"5. **Test the changes**: After changing your code, you need to thoroughly test it to make sure the warning is no longer appearing and that your changes haven't caused any other issues. This involves running existing unit tests and creating new tests if necessary.\n",
|
||||
"3. Analyze existing code or tests related to the issue: Look at the code in the FLAML repository that is related to the issue. Try to understand how it works and how it might be causing the issue.\n",
|
||||
"\n",
|
||||
"6. **Evaluate the change**: You need to check again to ensure that model performance and functionality haven't been negatively affected.\n",
|
||||
"4. Devise a solution to fix the issue: Based on your understanding of the problem and the code, come up with a solution to fix the issue. This might involve writing new code or modifying existing code.\n",
|
||||
"\n",
|
||||
"7. **Create a Pull Request**: Having made your changes and ensured everything is working correctly, you should submit your modification as a pull request to the original FLAML repository and follow any steps they have for the contribution process.\n",
|
||||
"5. Implement the solution in separate code pieces: Write the code for your solution. Make sure to break it down into small, manageable pieces. This will make it easier to understand and test.\n",
|
||||
"\n",
|
||||
"8. **Respond to review**: The reviewers may have comments or require changes, you should be ready to address any that come up until the solution is accepted.\n",
|
||||
"6. Verify that the solution addresses the issue: Test your code to make sure it solves the issue. You can do this by following the instructions you wrote to reproduce the issue.\n",
|
||||
"\n",
|
||||
"Please note that each step in this plan requires coding and/or reasoning, which are critical to an AI assistant's operations. \n",
|
||||
"7. Write unit tests to ensure the solution is robust and handles edge cases: In addition to testing your solution manually, write unit tests to automatically test your code. This will help ensure that your solution works in all cases and that future changes to the code won't break your solution.\n",
|
||||
"\n",
|
||||
"8. Prepare a pull request on GitHub: Once you're confident that your solution works, prepare a pull request on GitHub. In the pull request description, mention the issue number and give a brief description of your solution.\n",
|
||||
"\n",
|
||||
"Please note that this is a general plan and the specific steps may vary depending on the issue. Adjust the plan as needed based on the issue's requirements and your problem-solving approach. \n",
|
||||
"\n",
|
||||
"TERMINATE\n",
|
||||
"\n",
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"# Interactive LLM Agent Dealing with Data Stream\n",
|
||||
"\n",
|
||||
"`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
|
||||
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
|
||||
"\n",
|
||||
"In this notebook, we demonstrate how to use customized agents to continuously acquires news from the web and ask for investment suggestions.\n",
|
||||
"\n",
|
||||
@@ -45,7 +45,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# %pip install flaml[autogen]~=2.0.0"
|
||||
"# %pip install flaml[autogen]~=2.1.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -258,7 +258,7 @@
|
||||
" )\n",
|
||||
" return False, None\n",
|
||||
"\n",
|
||||
"user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, config={\"news_stream\": data})"
|
||||
"user_proxy.register_reply(autogen.AssistantAgent, add_data_reply, 1, config={\"news_stream\": data})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user