diff --git a/BUILDING.md b/BUILDING.md index 0e0379265f2eb0..afa373df7998a9 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -1,61 +1,61 @@ # Building Node.js -Depending on what platform or features you need, the build process may -differ. After you've built a binary, running the -test suite to confirm that the binary works as intended is a good next step. +Depending on what platform or features you need, the build process may differ. +After you've built a binary, running the test suite to confirm that the binary +works as intended is a good next step. If you can reproduce a test failure, search for it in the -[Node.js issue tracker](https://github.com/nodejs/node/issues) or -file a new issue. +[Node.js issue tracker](https://github.com/nodejs/node/issues) or file a new +issue. ## Table of Contents -* [Supported platforms](#supported-platforms) - * [Input](#input) - * [Strategy](#strategy) - * [Platform list](#platform-list) - * [Supported toolchains](#supported-toolchains) - * [Official binary platforms and toolchains](#official-binary-platforms-and-toolchains) - * [OpenSSL asm support](#openssl-asm-support) - * [Previous versions of this document](#previous-versions-of-this-document) -* [Building Node.js on supported platforms](#building-nodejs-on-supported-platforms) - * [Note about Python 2 and Python 3](#note-about-python-2-and-python-3) - * [Unix and macOS](#unix-and-macos) - * [Unix prerequisites](#unix-prerequisites) - * [macOS prerequisites](#macos-prerequisites) - * [Building Node.js](#building-nodejs) - * [Running Tests](#running-tests) - * [Running Coverage](#running-coverage) - * [Building the documentation](#building-the-documentation) - * [Building a debug build](#building-a-debug-build) - * [Windows](#windows) - * [Prerequisites](#prerequisites) - * [Option 1: Manual install](#option-1-manual-install) - * [Option 1: Automated install with Boxstarter](#option-1-automated-install-with-boxstarter) - * [Building Node.js](#building-nodejs-1) - * [Android/Android-based devices (e.g. Firefox OS)](#androidandroid-based-devices-eg-firefox-os) -* [`Intl` (ECMA-402) support](#intl-ecma-402-support) - * [Default: `small-icu` (English only) support](#default-small-icu-english-only-support) - * [Build with full ICU support (all locales supported by ICU)](#build-with-full-icu-support-all-locales-supported-by-icu) - * [Unix/macOS](#unixmacos) - * [Windows](#windows-1) - * [Building without Intl support](#building-without-intl-support) - * [Unix/macOS](#unixmacos-1) - * [Windows](#windows-2) - * [Use existing installed ICU (Unix/macOS only)](#use-existing-installed-icu-unixmacOS-only) - * [Build with a specific ICU](#build-with-a-specific-icu) - * [Unix/macOS](#unixmacos-2) - * [Windows](#windows-3) -* [Building Node.js with FIPS-compliant OpenSSL](#building-nodejs-with-fips-compliant-openssl) -* [Building Node.js with external core modules](#building-nodejs-with-external-core-modules) - * [Unix/macOS](#unixmacos-3) - * [Windows](#windows-4) -* [Note for downstream distributors of Node.js](#note-for-downstream-distributors-of-nodejs) +- [Supported platforms](#supported-platforms) + - [Input](#input) + - [Strategy](#strategy) + - [Platform list](#platform-list) + - [Supported toolchains](#supported-toolchains) + - [Official binary platforms and toolchains](#official-binary-platforms-and-toolchains) + - [OpenSSL asm support](#openssl-asm-support) + - [Previous versions of this document](#previous-versions-of-this-document) +- [Building Node.js on supported platforms](#building-nodejs-on-supported-platforms) + - [Note about Python 2 and Python 3](#note-about-python-2-and-python-3) + - [Unix and macOS](#unix-and-macos) + - [Unix prerequisites](#unix-prerequisites) + - [macOS prerequisites](#macos-prerequisites) + - [Building Node.js](#building-nodejs) + - [Running Tests](#running-tests) + - [Running Coverage](#running-coverage) + - [Building the documentation](#building-the-documentation) + - [Building a debug build](#building-a-debug-build) + - [Windows](#windows) + - [Prerequisites](#prerequisites) + - [Option 1: Manual install](#option-1-manual-install) + - [Option 1: Automated install with Boxstarter](#option-1-automated-install-with-boxstarter) + - [Building Node.js](#building-nodejs-1) + - [Android/Android-based devices (e.g. Firefox OS)](#androidandroid-based-devices-eg-firefox-os) +- [`Intl` (ECMA-402) support](#intl-ecma-402-support) + - [Default: `small-icu` (English only) support](#default-small-icu-english-only-support) + - [Build with full ICU support (all locales supported by ICU)](#build-with-full-icu-support-all-locales-supported-by-icu) + - [Unix/macOS](#unixmacos) + - [Windows](#windows-1) + - [Building without Intl support](#building-without-intl-support) + - [Unix/macOS](#unixmacos-1) + - [Windows](#windows-2) + - [Use existing installed ICU (Unix/macOS only)](#use-existing-installed-icu-unixmacOS-only) + - [Build with a specific ICU](#build-with-a-specific-icu) + - [Unix/macOS](#unixmacos-2) + - [Windows](#windows-3) +- [Building Node.js with FIPS-compliant OpenSSL](#building-nodejs-with-fips-compliant-openssl) +- [Building Node.js with external core modules](#building-nodejs-with-external-core-modules) + - [Unix/macOS](#unixmacos-3) + - [Windows](#windows-4) +- [Note for downstream distributors of Node.js](#note-for-downstream-distributors-of-nodejs) ## Supported platforms -This list of supported platforms is current as of the branch/release to -which it belongs. +This list of supported platforms is current as of the branch/release to which it +belongs. ### Input @@ -65,20 +65,20 @@ Node.js relies on V8 and libuv. We adopt a subset of their supported platforms. There are three support tiers: -* **Tier 1**: These platforms represent the majority of Node.js users. The +- **Tier 1**: These platforms represent the majority of Node.js users. The Node.js Build Working Group maintains infrastructure for full test coverage. - Maintenance is supported by the Node.js core team. All commits to the - Node.js repository are tested on multiple variants of these platforms. Test - failures on tier 1 platforms will block releases. -* **Tier 2**: These platforms represent smaller segments of the Node.js user + Maintenance is supported by the Node.js core team. All commits to the Node.js + repository are tested on multiple variants of these platforms. Test failures + on tier 1 platforms will block releases. +- **Tier 2**: These platforms represent smaller segments of the Node.js user base. The Node.js Build Working Group maintains infrastructure for full test - coverage. Maintenance is supported by smaller groups or individuals within - the Node.js core team, or the vendor of the platform itself. All commits to - the Node.js repository are tested on multiple variants of these platforms - where practical. Test failures on tier 2 platforms will block releases. - Delays in release of binaries for these platforms are acceptable - where necessary due to infrastructure concerns. -* **Experimental**: May not compile or test suite may not pass. The core team + coverage. Maintenance is supported by smaller groups or individuals within the + Node.js core team, or the vendor of the platform itself. All commits to the + Node.js repository are tested on multiple variants of these platforms where + practical. Test failures on tier 2 platforms will block releases. Delays in + release of binaries for these platforms are acceptable where necessary due to + infrastructure concerns. +- **Experimental**: May not compile or test suite may not pass. The core team does not create releases for these platforms. Test failures on experimental platforms do not block releases. Contributions to improve support for these platforms are welcome. @@ -89,67 +89,68 @@ will be updated to reflect those changes. ### Platform list Compiling and running Node.js is supported for a limited set of operating -systems, architectures and libc versions. The table below lists the -combinations that the core team has committed to supporting and the nature of -that support as per the support tiers above. A list of -[supported compile toolchains](#supported-toolchains) is also supplied for -tier 1 platforms. +systems, architectures and libc versions. The table below lists the combinations +that the core team has committed to supporting and the nature of that support as +per the support tiers above. A list of +[supported compile toolchains](#supported-toolchains) is also supplied for tier +1 platforms. **For production applications, run Node.js on supported platforms only.** -Node.js does not support a platform version if a vendor has expired support -for it. In other words, Node.js does not support running on End-of-Life (EoL) +Node.js does not support a platform version if a vendor has expired support for +it. In other words, Node.js does not support running on End-of-Life (EoL) platforms. This is true regardless of entries in the table below. -| Operating System | Architectures | Versions | Support Type | Notes | -| ---------------- | ---------------- | ------------------------------- | ------------ | --------------------------------- | -| GNU/Linux | x64 | kernel >= 3.10, glibc >= 2.17 | Tier 1 | e.g. Ubuntu 16.04 [1](#fn1), Debian 9, EL 7 [2](#fn2) | -| GNU/Linux | x64 | kernel >= 3.10, musl >= 1.1.19 | Experimental | e.g. Alpine 3.8 | -| GNU/Linux | x86 | kernel >= 3.10, glibc >= 2.17 | Experimental | Downgraded as of Node.js 10 | -| GNU/Linux | arm64 | kernel >= 4.5, glibc >= 2.17 | Tier 1 | e.g. Ubuntu 16.04, Debian 9, EL 7 [3](#fn3) | -| GNU/Linux | armv7 | kernel >= 4.14, glibc >= 2.24 | Tier 1 | e.g. Ubuntu 18.04, Debian 9 | -| GNU/Linux | armv6 | kernel >= 4.14, glibc >= 2.24 | Experimental | Downgraded as of Node.js 12 | -| GNU/Linux | ppc64le >=power8 | kernel >= 3.10.0, glibc >= 2.17 | Tier 2 | e.g. Ubuntu 16.04 [1](#fn1), EL 7 [2](#fn2) | -| GNU/Linux | s390x | kernel >= 3.10.0, glibc >= 2.17 | Tier 2 | e.g. EL 7 [2](#fn2) | -| Windows | x64, x86 (WoW64) | >= Windows 7/2008 R2/2012 R2 | Tier 1 | [4](#fn4),[5](#fn5) | -| Windows | x86 (native) | >= Windows 7/2008 R2/2012 R2 | Tier 1 (running) / Experimental (compiling) [6](#fn6) | | -| Windows | arm64 | >= Windows 10 | Experimental | | -| macOS | x64 | >= 10.11 | Tier 1 | | -| SmartOS | x64 | >= 18 | Tier 2 | | -| AIX | ppc64be >=power7 | >= 7.2 TL02 | Tier 2 | | -| FreeBSD | x64 | >= 11 | Experimental | Downgraded as of Node.js 12 | - -1: GCC 6 is not provided on the base platform, users will - need the - [Toolchain test builds PPA](https://launchpad.net/~ubuntu-toolchain-r/+archive/ubuntu/test?field.series_filter=xenial) - or similar to source a newer compiler. - -2: GCC 6 is not provided on the base platform, users will - need the - [devtoolset-6](https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/) - or later to source a newer compiler. +| Operating System | Architectures | Versions | Support Type | Notes | +| ---------------- | ---------------- | ------------------------------- | ---------------------------------------------------------------- | --------------------------------------------------------------------------- | +| GNU/Linux | x64 | kernel >= 3.10, glibc >= 2.17 | Tier 1 | e.g. Ubuntu 16.04 [1](#fn1), Debian 9, EL 7 [2](#fn2) | +| GNU/Linux | x64 | kernel >= 3.10, musl >= 1.1.19 | Experimental | e.g. Alpine 3.8 | +| GNU/Linux | x86 | kernel >= 3.10, glibc >= 2.17 | Experimental | Downgraded as of Node.js 10 | +| GNU/Linux | arm64 | kernel >= 4.5, glibc >= 2.17 | Tier 1 | e.g. Ubuntu 16.04, Debian 9, EL 7 [3](#fn3) | +| GNU/Linux | armv7 | kernel >= 4.14, glibc >= 2.24 | Tier 1 | e.g. Ubuntu 18.04, Debian 9 | +| GNU/Linux | armv6 | kernel >= 4.14, glibc >= 2.24 | Experimental | Downgraded as of Node.js 12 | +| GNU/Linux | ppc64le >=power8 | kernel >= 3.10.0, glibc >= 2.17 | Tier 2 | e.g. Ubuntu 16.04 [1](#fn1), EL 7 [2](#fn2) | +| GNU/Linux | s390x | kernel >= 3.10.0, glibc >= 2.17 | Tier 2 | e.g. EL 7 [2](#fn2) | +| Windows | x64, x86 (WoW64) | >= Windows 7/2008 R2/2012 R2 | Tier 1 | [4](#fn4),[5](#fn5) | +| Windows | x86 (native) | >= Windows 7/2008 R2/2012 R2 | Tier 1 (running) / Experimental (compiling) [6](#fn6) | | +| Windows | arm64 | >= Windows 10 | Experimental | | +| macOS | x64 | >= 10.11 | Tier 1 | | +| SmartOS | x64 | >= 18 | Tier 2 | | +| AIX | ppc64be >=power7 | >= 7.2 TL02 | Tier 2 | | +| FreeBSD | x64 | >= 11 | Experimental | Downgraded as of Node.js 12 | + +1: GCC 6 is not provided on the base platform, users will need +the +[Toolchain test builds PPA](https://launchpad.net/~ubuntu-toolchain-r/+archive/ubuntu/test?field.series_filter=xenial) +or similar to source a newer compiler. + +2: GCC 6 is not provided on the base platform, users will need +the +[devtoolset-6](https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/) +or later to source a newer compiler. 3: Older kernel versions may work for ARM64, however the - Node.js test infrastructure only tests >= 4.5. +Node.js test infrastructure only tests >= 4.5. 4: On Windows, running Node.js in Windows terminal emulators - like `mintty` requires the usage of [winpty](https://github.com/rprichard/winpty) - for the tty channels to work correctly (e.g. `winpty node.exe script.js`). - In "Git bash" if you call the node shell alias (`node` without the `.exe` - extension), `winpty` is used automatically. +like `mintty` requires the usage of +[winpty](https://github.com/rprichard/winpty) for the tty channels to work +correctly (e.g. `winpty node.exe script.js`). In "Git bash" if you call the node +shell alias (`node` without the `.exe` extension), `winpty` is used +automatically. 5: The Windows Subsystem for Linux (WSL) is not directly - supported, but the GNU/Linux build process and binaries should work. The - community will only address issues that reproduce on native GNU/Linux - systems. Issues that only reproduce on WSL should be reported in the - [WSL issue tracker](https://github.com/Microsoft/WSL/issues). Running the - Windows binary (`node.exe`) in WSL is not recommended. It will not work - without workarounds such as stdio redirection. - -6: Running Node.js on x86 Windows should work and binaries -are provided. However, tests in our infrastructure only run on WoW64. -Furthermore, compiling on x86 Windows is currently considered Experimental and -may not be possible. +supported, but the GNU/Linux build process and binaries should work. The +community will only address issues that reproduce on native GNU/Linux systems. +Issues that only reproduce on WSL should be reported in the +[WSL issue tracker](https://github.com/Microsoft/WSL/issues). Running the +Windows binary (`node.exe`) in WSL is not recommended. It will not work without +workarounds such as stdio redirection. + +6: Running Node.js on x86 Windows should work and binaries are +provided. However, tests in our infrastructure only run on WoW64. Furthermore, +compiling on x86 Windows is currently considered Experimental and may not be +possible. ### Supported toolchains @@ -165,59 +166,59 @@ Depending on the host platform, the selection of toolchains may vary. Binaries at are produced on: -| Binary package | Platform and Toolchain | -| --------------------- | ------------------------------------------------------------------------ | -| aix-ppc64 | AIX 7.1 TL05 on PPC64BE with GCC 6 | -| darwin-x64 (and .pkg) | macOS 10.11, Xcode Command Line Tools 10 with -mmacosx-version-min=10.10 | -| linux-arm64 | CentOS 7 with devtoolset-6 / GCC 6 | -| linux-armv7l | Cross-compiled on Ubuntu 16.04 x64 with [custom GCC toolchain](https://github.com/rvagg/rpi-newer-crosstools) | -| linux-ppc64le | CentOS 7 with devtoolset-6 / GCC 6 [7](#fn7) | -| linux-s390x | RHEL 7 with devtoolset-6 / GCC 6 [7](#fn7) | -| linux-x64 | CentOS 7 with devtoolset-6 / GCC 6 [7](#fn7) | -| sunos-x64 | SmartOS 18 with GCC 7 | -| win-x64 and win-x86 | Windows 2012 R2 (x64) with Visual Studio 2017 | +| Binary package | Platform and Toolchain | +| --------------------- | ------------------------------------------------------------------------------------------------------------- | +| aix-ppc64 | AIX 7.1 TL05 on PPC64BE with GCC 6 | +| darwin-x64 (and .pkg) | macOS 10.11, Xcode Command Line Tools 10 with -mmacosx-version-min=10.10 | +| linux-arm64 | CentOS 7 with devtoolset-6 / GCC 6 | +| linux-armv7l | Cross-compiled on Ubuntu 16.04 x64 with [custom GCC toolchain](https://github.com/rvagg/rpi-newer-crosstools) | +| linux-ppc64le | CentOS 7 with devtoolset-6 / GCC 6 [7](#fn7) | +| linux-s390x | RHEL 7 with devtoolset-6 / GCC 6 [7](#fn7) | +| linux-x64 | CentOS 7 with devtoolset-6 / GCC 6 [7](#fn7) | +| sunos-x64 | SmartOS 18 with GCC 7 | +| win-x64 and win-x86 | Windows 2012 R2 (x64) with Visual Studio 2017 | 7: The Enterprise Linux devtoolset-6 allows us to compile binaries with GCC 6 but linked to the glibc and libstdc++ versions of the host -platforms (CentOS 7 / RHEL 7). Therefore, binaries produced on these systems -are compatible with glibc >= 2.17 and libstdc++ >= 6.0.20 (`GLIBCXX_3.4.20`). -These are available on distributions natively supporting GCC 4.9, such as -Ubuntu 14.04 and Debian 8. +platforms (CentOS 7 / RHEL 7). Therefore, binaries produced on these systems are +compatible with glibc >= 2.17 and libstdc++ >= 6.0.20 (`GLIBCXX_3.4.20`). These +are available on distributions natively supporting GCC 4.9, such as Ubuntu 14.04 +and Debian 8. #### OpenSSL asm support -OpenSSL-1.1.1 requires the following assembler version for use of asm -support on x86_64 and ia32. +OpenSSL-1.1.1 requires the following assembler version for use of asm support on +x86_64 and ia32. For use of AVX-512, -* gas (GNU assembler) version 2.26 or higher -* nasm version 2.11.8 or higher in Windows +- gas (GNU assembler) version 2.26 or higher +- nasm version 2.11.8 or higher in Windows AVX-512 is disabled for Skylake-X by OpenSSL-1.1.1. For use of AVX2, -* gas (GNU assembler) version 2.23 or higher -* Xcode version 5.0 or higher -* llvm version 3.3 or higher -* nasm version 2.10 or higher in Windows +- gas (GNU assembler) version 2.23 or higher +- Xcode version 5.0 or higher +- llvm version 3.3 or higher +- nasm version 2.10 or higher in Windows -Please refer to - https://www.openssl.org/docs/man1.1.1/man3/OPENSSL_ia32cap.html for details. +Please refer to https://www.openssl.org/docs/man1.1.1/man3/OPENSSL_ia32cap.html +for details. - If compiling without one of the above, use `configure` with the +If compiling without one of the above, use `configure` with the `--openssl-no-asm` flag. Otherwise, `configure` will fail. ### Previous versions of this document Supported platforms and toolchains change with each major version of Node.js. -This document is only valid for the current major version of Node.js. -Consult previous versions of this document for older versions of Node.js: +This document is only valid for the current major version of Node.js. Consult +previous versions of this document for older versions of Node.js: -* [Node.js 10](https://github.com/nodejs/node/blob/v10.x/BUILDING.md) -* [Node.js 8](https://github.com/nodejs/node/blob/v8.x/BUILDING.md) -* [Node.js 6](https://github.com/nodejs/node/blob/v6.x/BUILDING.md) +- [Node.js 10](https://github.com/nodejs/node/blob/v10.x/BUILDING.md) +- [Node.js 8](https://github.com/nodejs/node/blob/v8.x/BUILDING.md) +- [Node.js 6](https://github.com/nodejs/node/blob/v6.x/BUILDING.md) ## Building Node.js on supported platforms @@ -227,46 +228,46 @@ The Node.js project uses Python as part of its build process and has historically only been Python 2 compatible. Python 2 will reach its _end-of-life_ at the end of 2019 at which point the -interpreter will cease receiving updates. See https://python3statement.org/ -for more information. +interpreter will cease receiving updates. See https://python3statement.org/ for +more information. -The Node.js project is in the process of transitioning its Python code to -Python 3 compatibility. Installing both versions of Python while building -and testing Node.js allows developers and end users to test, benchmark, -and debug Node.js running on both versions to ensure a smooth and complete -transition before the year-end deadline. +The Node.js project is in the process of transitioning its Python code to Python +3 compatibility. Installing both versions of Python while building and testing +Node.js allows developers and end users to test, benchmark, and debug Node.js +running on both versions to ensure a smooth and complete transition before the +year-end deadline. ### Unix and macOS #### Unix prerequisites -* `gcc` and `g++` >= 6.3 or newer, or -* GNU Make 3.81 or newer -* Python (see note above) - * Python 2.7 - * Python 3.5, 3.6, and 3.7 are experimental. +- `gcc` and `g++` >= 6.3 or newer, or +- GNU Make 3.81 or newer +- Python (see note above) + - Python 2.7 + - Python 3.5, 3.6, and 3.7 are experimental. Installation via Linux package manager can be achieved with: -* Ubuntu, Debian: `sudo apt-get install python g++ make` -* Fedora: `sudo dnf install python gcc-c++ make` -* CentOS and RHEL: `sudo yum install python gcc-c++ make` -* OpenSUSE: `sudo zypper install python gcc-c++ make` +- Ubuntu, Debian: `sudo apt-get install python g++ make` +- Fedora: `sudo dnf install python gcc-c++ make` +- CentOS and RHEL: `sudo yum install python gcc-c++ make` +- OpenSUSE: `sudo zypper install python gcc-c++ make` FreeBSD and OpenBSD users may also need to install `libexecinfo`. #### macOS prerequisites -* Xcode Command Line Tools >= 10 for macOS -* Python (see note above) - * Python 2.7 - * Python 3.5, 3.6, and 3.7 are experimental. +- Xcode Command Line Tools >= 10 for macOS +- Python (see note above) + - Python 2.7 + - Python 3.5, 3.6, and 3.7 are experimental. macOS users can install the `Xcode Command Line Tools` by running `xcode-select --install`. Alternatively, if you already have the full Xcode -installed, you can find them under the menu `Xcode -> Open Developer Tool -> -More Developer Tools...`. This step will install `clang`, `clang++`, and -`make`. +installed, you can find them under the menu +`Xcode -> Open Developer Tool -> More Developer Tools...`. This step will +install `clang`, `clang++`, and `make`. #### Building Node.js @@ -284,8 +285,8 @@ The `-j4` option will cause `make` to run 4 simultaneous compilation jobs which may reduce build time. For more information, see the [GNU Make Documentation](https://www.gnu.org/software/make/manual/html_node/Parallel.html). -The above requires that `python` resolves to a supported version of -Python. See [Prerequisites](#prerequisites). +The above requires that `python` resolves to a supported version of Python. See +[Prerequisites](#prerequisites). After building, setting up [firewall rules](tools/macos-firewall.sh) can avoid popups asking to accept incoming network connections when running tests. @@ -324,22 +325,22 @@ do not submit patches that fail either check. If you want to run the linter without running tests, use `make lint`/`vcbuild lint`. It will lint JavaScript, C++, and Markdown files. -If you are updating tests and want to run tests in a single test file -(e.g. `test/parallel/test-stream2-transform.js`): +If you are updating tests and want to run tests in a single test file (e.g. +`test/parallel/test-stream2-transform.js`): ```text $ python tools/test.py test/parallel/test-stream2-transform.js ``` -You can execute the entire suite of tests for a given subsystem -by providing the name of a subsystem: +You can execute the entire suite of tests for a given subsystem by providing the +name of a subsystem: ```text $ python tools/test.py -J --mode=release child-process ``` -If you want to check the other options, please refer to the help by using -the `--help` option: +If you want to check the other options, please refer to the help by using the +`--help` option: ```text $ python tools/test.py --help @@ -355,10 +356,10 @@ Remember to recompile with `make -j4` in between test runs if you change code in the `lib` or `src` directories. The tests attempt to detect support for IPv6 and exclude IPv6 tests if -appropriate. If your main interface has IPv6 addresses, then your -loopback interface must also have '::1' enabled. For some default installations -on Ubuntu that does not seem to be the case. To enable '::1' on the -loopback interface on Ubuntu: +appropriate. If your main interface has IPv6 addresses, then your loopback +interface must also have '::1' enabled. For some default installations on Ubuntu +that does not seem to be the case. To enable '::1' on the loopback interface on +Ubuntu: ```bash sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0 @@ -366,8 +367,8 @@ sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0 #### Running Coverage -It's good practice to ensure any code you add or change is covered by tests. -You can do so by running the test suite with coverage enabled: +It's good practice to ensure any code you add or change is covered by tests. You +can do so by running the test suite with coverage enabled: ```console $ ./configure --coverage @@ -375,9 +376,9 @@ $ make coverage ``` A detailed coverage report will be written to `coverage/index.html` for -JavaScript coverage and to `coverage/cxxcoverage.html` for C++ coverage -(if you only want to run the JavaScript tests then you do not need to run -the first command `./configure --coverage`). +JavaScript coverage and to `coverage/cxxcoverage.html` for C++ coverage (if you +only want to run the JavaScript tests then you do not need to run the first +command `./configure --coverage`). _Generating a test coverage report can take several minutes._ @@ -389,8 +390,8 @@ isolation, unset the opposing `_SUITES` variable): $ CI_JS_SUITES=child-process CI_NATIVE_SUITES= make coverage ``` -The above command executes tests for the `child-process` subsystem and -outputs the resulting coverage report. +The above command executes tests for the `child-process` subsystem and outputs +the resulting coverage report. Alternatively, you can run `make coverage-run-js`, to execute JavaScript tests independently of the C++ test suite: @@ -428,8 +429,8 @@ To read the documentation: $ man doc/node.1 ``` -If you prefer to read the documentation in a browser, -run the following after `make doc` is finished: +If you prefer to read the documentation in a browser, run the following after +`make doc` is finished: ```console $ make docopen @@ -451,9 +452,9 @@ $ [sudo] make install #### Building a debug build -If you run into an issue where the information provided by the JS stack trace -is not enough, or if you suspect the error happens outside of the JS VM, you -can try to build a debug enabled binary: +If you run into an issue where the information provided by the JS stack trace is +not enough, or if you suspect the error happens outside of the JS VM, you can +try to build a debug enabled binary: ```console $ ./configure --debug @@ -467,14 +468,14 @@ release version is actually installed when you run `make install`. To use the debug build with all the normal dependencies overwrite the release version in the install directory: -``` console +```console $ make install --prefix=/opt/node-debug/ $ cp -a -f out/Debug/node /opt/node-debug/node ``` When using the debug binary, core dumps will be generated in case of crashes. -These core dumps are useful for debugging when provided with the -corresponding original debug binary and system information. +These core dumps are useful for debugging when provided with the corresponding +original debug binary and system information. Reading the core dump requires `gdb` built on the same platform the core dump was captured on (i.e. 64-bit `gdb` for `node` built on a 64-bit system, Linux @@ -483,7 +484,7 @@ was captured on (i.e. 64-bit `gdb` for `node` built on a 64-bit system, Linux Example of generating a backtrace from the core dump: -``` console +```console $ gdb /opt/node-debug/node core.node.8.1535359906 $ backtrace ``` @@ -494,37 +495,38 @@ $ backtrace ##### Option 1: Manual install -* [Python 2.7](https://www.python.org/downloads/) -* The "Desktop development with C++" workload from - [Visual Studio 2017](https://www.visualstudio.com/downloads/) or the - "Visual C++ build tools" workload from the +- [Python 2.7](https://www.python.org/downloads/) +- The "Desktop development with C++" workload from + [Visual Studio 2017](https://www.visualstudio.com/downloads/) or the "Visual + C++ build tools" workload from the [Build Tools](https://www.visualstudio.com/downloads/#build-tools-for-visual-studio-2017), with the default optional components. -* Basic Unix tools required for some tests, - [Git for Windows](https://git-scm.com/download/win) includes Git Bash - and tools which can be included in the global `PATH`. -* The [NetWide Assembler](https://www.nasm.us/), for OpenSSL assembler modules. - If not installed in the default location, it needs to be manually added - to `PATH`. A build with the `openssl-no-asm` option does not need this, nor - does a build targeting ARM64 Windows. +- Basic Unix tools required for some tests, + [Git for Windows](https://git-scm.com/download/win) includes Git Bash and + tools which can be included in the global `PATH`. +- The [NetWide Assembler](https://www.nasm.us/), for OpenSSL assembler modules. + If not installed in the default location, it needs to be manually added to + `PATH`. A build with the `openssl-no-asm` option does not need this, nor does + a build targeting ARM64 Windows. Optional requirements to build the MSI installer package: -* The [WiX Toolset v3.11](https://wixtoolset.org/releases/) and the +- The [WiX Toolset v3.11](https://wixtoolset.org/releases/) and the [Wix Toolset Visual Studio 2017 Extension](https://marketplace.visualstudio.com/items?itemName=RobMensching.WixToolsetVisualStudio2017Extension). Optional requirements for compiling for Windows 10 on ARM (ARM64): -* ARM64 Windows build machine - * Due to a GYP limitation, this is required to run compiled code - generation tools (like V8's builtins and mksnapshot tools) -* Visual Studio 15.9.0 or newer -* Visual Studio optional components - * Visual C++ compilers and libraries for ARM64 - * Visual C++ ATL for ARM64 -* Windows 10 SDK 10.0.17763.0 or newer +- ARM64 Windows build machine + - Due to a GYP limitation, this is required to run compiled code generation + tools (like V8's builtins and mksnapshot tools) +- Visual Studio 15.9.0 or newer +- Visual Studio optional components + - Visual C++ compilers and libraries for ARM64 + - Visual C++ ATL for ARM64 +- Windows 10 SDK 10.0.17763.0 or newer ##### Option 2: Automated install with Boxstarter + A [Boxstarter](https://boxstarter.org/) script can be used for easy setup of @@ -532,13 +534,14 @@ Windows systems with all the required prerequisites for Node.js development. This script will install the following [Chocolatey](https://chocolatey.org/) packages: -* [Git for Windows](https://chocolatey.org/packages/git) with the `git` and - Unix tools added to the `PATH`. -* [Python 3.x](https://chocolatey.org/packages/python) and +- [Git for Windows](https://chocolatey.org/packages/git) with the `git` and Unix + tools added to the `PATH`. +- [Python 3.x](https://chocolatey.org/packages/python) and [legacy Python](https://chocolatey.org/packages/python2) -* [Visual Studio 2017 Build Tools](https://chocolatey.org/packages/visualstudio2017buildtools) - with [Visual C++ workload](https://chocolatey.org/packages/visualstudio2017-workload-vctools) -* [NetWide Assembler](https://chocolatey.org/packages/nasm) +- [Visual Studio 2017 Build Tools](https://chocolatey.org/packages/visualstudio2017buildtools) + with + [Visual C++ workload](https://chocolatey.org/packages/visualstudio2017-workload-vctools) +- [NetWide Assembler](https://chocolatey.org/packages/nasm) To install Node.js prerequisites using [Boxstarter WebLauncher](https://boxstarter.org/WebLauncher), open @@ -602,16 +605,15 @@ enabled by default, with English data only. ### Default: `small-icu` (English only) support -By default, only English data is included, but -the full `Intl` (ECMA-402) APIs. It does not need to download -any dependencies to function. You can add full -data at runtime. +By default, only English data is included, but the full `Intl` (ECMA-402) APIs. +It does not need to download any dependencies to function. You can add full data +at runtime. ### Build with full ICU support (all locales supported by ICU) -With the `--download=all`, this may download ICU if you don't have an -ICU in `deps/icu`. (The embedded `small-icu` included in the default -Node.js source does not include all locales.) +With the `--download=all`, this may download ICU if you don't have an ICU in +`deps/icu`. (The embedded `small-icu` included in the default Node.js source +does not include all locales.) #### Unix/macOS @@ -648,19 +650,18 @@ $ ./configure --without-intl $ pkg-config --modversion icu-i18n && ./configure --with-intl=system-icu ``` -If you are cross-compiling, your `pkg-config` must be able to supply a path -that works for both your host and target environments. +If you are cross-compiling, your `pkg-config` must be able to supply a path that +works for both your host and target environments. ### Build with a specific ICU You can find other ICU releases at -[the ICU homepage](http://icu-project.org/download). -Download the file named something like `icu4c-**##.#**-src.tgz` (or -`.zip`). +[the ICU homepage](http://icu-project.org/download). Download the file named +something like `icu4c-**##.#**-src.tgz` (or `.zip`). -To check the minimum recommended ICU, run `./configure --help` and see -the help for the `--with-icu-source` option. A warning will be printed -during configuration if the ICU version is too old. +To check the minimum recommended ICU, run `./configure --help` and see the help +for the `--with-icu-source` option. A warning will be printed during +configuration if the ICU version is too old. #### Unix/macOS @@ -685,8 +686,8 @@ $ ./configure --with-intl=full-icu --with-icu-source=http://url/to/icu.tgz #### Windows First unpack latest ICU to `deps/icu` -[icu4c-**##.#**-src.tgz](http://icu-project.org/download) (or `.zip`) -as `deps/icu` (You'll have: `deps/icu/source/...`) +[icu4c-**##.#**-src.tgz](http://icu-project.org/download) (or `.zip`) as +`deps/icu` (You'll have: `deps/icu/source/...`) ```console > .\vcbuild full-icu @@ -698,8 +699,8 @@ The current version of Node.js does not support FIPS. ## Building Node.js with external core modules -It is possible to specify one or more JavaScript text files to be bundled in -the binary as built-in modules when building Node.js. +It is possible to specify one or more JavaScript text files to be bundled in the +binary as built-in modules when building Node.js. ### Unix/macOS @@ -713,8 +714,8 @@ $ ./configure --link-module '/root/myModule.js' --link-module './myModule2.js' ### Windows -To make `./myModule.js` available via `require('myModule')` and -`./myModule2.js` available via `require('myModule2')`: +To make `./myModule.js` available via `require('myModule')` and `./myModule2.js` +available via `require('myModule2')`: ```console > .\vcbuild link-module './myModule.js' link-module './myModule2.js' @@ -722,14 +723,14 @@ To make `./myModule.js` available via `require('myModule')` and ## Note for downstream distributors of Node.js -The Node.js ecosystem is reliant on ABI compatibility within a major release. -To maintain ABI compatibility it is required that distributed builds of Node.js -be built against the same version of dependencies, or similar versions that do -not break their ABI compatibility, as those released by Node.js for any given +The Node.js ecosystem is reliant on ABI compatibility within a major release. To +maintain ABI compatibility it is required that distributed builds of Node.js be +built against the same version of dependencies, or similar versions that do not +break their ABI compatibility, as those released by Node.js for any given `NODE_MODULE_VERSION` (located in `src/node_version.h`). -When Node.js is built (with an intention to distribute) with an ABI -incompatible with the official Node.js builds (e.g. using a ABI incompatible -version of a dependency), please reserve and use a custom `NODE_MODULE_VERSION` -by opening a pull request against the registry available at +When Node.js is built (with an intention to distribute) with an ABI incompatible +with the official Node.js builds (e.g. using a ABI incompatible version of a +dependency), please reserve and use a custom `NODE_MODULE_VERSION` by opening a +pull request against the registry available at . diff --git a/benchmark/process/bench-env.js b/benchmark/process/bench-env.js index 5df521cc958389..25d5c2a2843698 100644 --- a/benchmark/process/bench-env.js +++ b/benchmark/process/bench-env.js @@ -1,37 +1,34 @@ -'use strict'; +"use strict"; -const common = require('../common'); +const common = require("../common"); const bench = common.createBenchmark(main, { n: [1e6], - operation: ['get', 'set', 'enumerate', 'query', 'delete'] + operation: ["get", "set", "enumerate", "query", "delete"] }); - function main({ n, operation }) { switch (operation) { - case 'get': + case "get": bench.start(); for (let i = 0; i < n; i++) { process.env.PATH; } bench.end(n); break; - case 'set': + case "set": bench.start(); for (let i = 0; i < n; i++) { - process.env.DUMMY = 'hello, world'; + process.env.DUMMY = "hello, world"; } bench.end(n); break; - case 'enumerate': + case "enumerate": // First, normalize process.env so that benchmark results are comparable. - for (const key of Object.keys(process.env)) - delete process.env[key]; - for (let i = 0; i < 64; i++) - process.env[Math.random()] = Math.random(); + for (const key of Object.keys(process.env)) delete process.env[key]; + for (let i = 0; i < 64; i++) process.env[Math.random()] = Math.random(); - n /= 10; // Enumeration is comparatively heavy. + n /= 10; // Enumeration is comparatively heavy. bench.start(); for (let i = 0; i < n; i++) { // Access every item in object to process values. @@ -39,14 +36,14 @@ function main({ n, operation }) { } bench.end(n); break; - case 'query': + case "query": bench.start(); for (let i = 0; i < n; i++) { - 'PATH' in process.env; + "PATH" in process.env; } bench.end(n); break; - case 'delete': + case "delete": bench.start(); for (let i = 0; i < n; i++) { delete process.env.DUMMY; diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 47abd027490679..3da4073dbd37bd 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -190,189 +190,201 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler, public TorqueGeneratedExportedMacrosAssembler { - public: - using Node = compiler::Node; - template - using TNode = compiler::TNode; - template - using SloppyTNode = compiler::SloppyTNode; - - template - using LazyNode = std::function()>; - - explicit CodeStubAssembler(compiler::CodeAssemblerState* state); - - enum AllocationFlag : uint8_t { - kNone = 0, - kDoubleAlignment = 1, - kPretenured = 1 << 1, - kAllowLargeObjectAllocation = 1 << 2, - }; - - enum SlackTrackingMode { kWithSlackTracking, kNoSlackTracking }; - - using AllocationFlags = base::Flags; - - enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS }; - - // On 32-bit platforms, there is a slight performance advantage to doing all - // of the array offset/index arithmetic with SMIs, since it's possible - // to save a few tag/untag operations without paying an extra expense when - // calculating array offset (the smi math can be folded away) and there are - // fewer live ranges. Thus only convert indices to untagged value on 64-bit - // platforms. - ParameterMode OptimalParameterMode() const { - return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS; - } +public: + using Node = compiler::Node; + template + using TNode = compiler::TNode; + template + using SloppyTNode = compiler::SloppyTNode; + + template + using LazyNode = std::function()>; + + explicit CodeStubAssembler(compiler::CodeAssemblerState* state); + + enum AllocationFlag : uint8_t { + kNone = 0, + kDoubleAlignment = 1, + kPretenured = 1 << 1, + kAllowLargeObjectAllocation = 1 << 2, + }; + + enum SlackTrackingMode { kWithSlackTracking, kNoSlackTracking }; + + using AllocationFlags = base::Flags; + + enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS }; + + // On 32-bit platforms, there is a slight performance advantage to doing all + // of the array offset/index arithmetic with SMIs, since it's possible + // to save a few tag/untag operations without paying an extra expense when + // calculating array offset (the smi math can be folded away) and there are + // fewer live ranges. Thus only convert indices to untagged value on 64-bit + // platforms. + ParameterMode OptimalParameterMode() const { + return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS; + } - MachineRepresentation ParameterRepresentation(ParameterMode mode) const { - return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation() - : MachineRepresentation::kTaggedSigned; - } + MachineRepresentation ParameterRepresentation(ParameterMode mode) const { + return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation() + : MachineRepresentation::kTaggedSigned; + } - MachineRepresentation OptimalParameterRepresentation() const { - return ParameterRepresentation(OptimalParameterMode()); - } + MachineRepresentation OptimalParameterRepresentation() const { + return ParameterRepresentation(OptimalParameterMode()); + } - TNode ParameterToIntPtr(Node* value, ParameterMode mode) { - if (mode == SMI_PARAMETERS) value = SmiUntag(value); - return UncheckedCast(value); - } + TNode ParameterToIntPtr(Node* value, ParameterMode mode) { + if (mode == SMI_PARAMETERS) value = SmiUntag(value); + return UncheckedCast(value); + } - Node* IntPtrToParameter(SloppyTNode value, ParameterMode mode) { - if (mode == SMI_PARAMETERS) return SmiTag(value); - return value; - } + Node* IntPtrToParameter(SloppyTNode value, ParameterMode mode) { + if (mode == SMI_PARAMETERS) return SmiTag(value); + return value; + } - Node* Int32ToParameter(SloppyTNode value, ParameterMode mode) { - return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode); - } + Node* Int32ToParameter(SloppyTNode value, ParameterMode mode) { + return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode); + } - TNode ParameterToTagged(Node* value, ParameterMode mode) { - if (mode != SMI_PARAMETERS) return SmiTag(value); - return UncheckedCast(value); - } + TNode ParameterToTagged(Node* value, ParameterMode mode) { + if (mode != SMI_PARAMETERS) return SmiTag(value); + return UncheckedCast(value); + } - Node* TaggedToParameter(SloppyTNode value, ParameterMode mode) { - if (mode != SMI_PARAMETERS) return SmiUntag(value); - return value; - } + Node* TaggedToParameter(SloppyTNode value, ParameterMode mode) { + if (mode != SMI_PARAMETERS) return SmiUntag(value); + return value; + } - bool ToParameterConstant(Node* node, intptr_t* out, ParameterMode mode) { - if (mode == ParameterMode::SMI_PARAMETERS) { - Smi constant; - if (ToSmiConstant(node, &constant)) { - *out = static_cast(constant.value()); - return true; - } - } else { - DCHECK_EQ(mode, ParameterMode::INTPTR_PARAMETERS); - intptr_t constant; - if (ToIntPtrConstant(node, constant)) { - *out = constant; - return true; - } - } - - return false; - } + bool ToParameterConstant(Node* node, intptr_t* out, ParameterMode mode) { + if (mode == ParameterMode::SMI_PARAMETERS) { + Smi constant; + if (ToSmiConstant(node, &constant)) { + *out = static_cast(constant.value()); + return true; + } + } else { + DCHECK_EQ(mode, ParameterMode::INTPTR_PARAMETERS); + intptr_t constant; + if (ToIntPtrConstant(node, constant)) { + *out = constant; + return true; + } + } + + return false; + } #if defined(V8_HOST_ARCH_32_BIT) - TNode BIntToSmi(TNode source) { return source; } - TNode BIntToIntPtr(TNode source) { - return SmiToIntPtr(source); - } - TNode SmiToBInt(TNode source) { return source; } - TNode IntPtrToBInt(TNode source) { - return SmiFromIntPtr(source); - } + TNode BIntToSmi(TNode source) { + return source; + } + TNode BIntToIntPtr(TNode source) { + return SmiToIntPtr(source); + } + TNode SmiToBInt(TNode source) { + return source; + } + TNode IntPtrToBInt(TNode source) { + return SmiFromIntPtr(source); + } #elif defined(V8_HOST_ARCH_64_BIT) - TNode BIntToSmi(TNode source) { return SmiFromIntPtr(source); } - TNode BIntToIntPtr(TNode source) { return source; } - TNode SmiToBInt(TNode source) { return SmiToIntPtr(source); } - TNode IntPtrToBInt(TNode source) { return source; } + TNode BIntToSmi(TNode source) { + return SmiFromIntPtr(source); + } + TNode BIntToIntPtr(TNode source) { + return source; + } + TNode SmiToBInt(TNode source) { + return SmiToIntPtr(source); + } + TNode IntPtrToBInt(TNode source) { + return source; + } #else #error Unknown architecture. #endif - TNode TaggedToSmi(TNode value, Label* fail) { - GotoIf(TaggedIsNotSmi(value), fail); - return UncheckedCast(value); - } + TNode TaggedToSmi(TNode value, Label* fail) { + GotoIf(TaggedIsNotSmi(value), fail); + return UncheckedCast(value); + } - TNode TaggedToPositiveSmi(TNode value, Label* fail) { - GotoIfNot(TaggedIsPositiveSmi(value), fail); - return UncheckedCast(value); - } + TNode TaggedToPositiveSmi(TNode value, Label* fail) { + GotoIfNot(TaggedIsPositiveSmi(value), fail); + return UncheckedCast(value); + } - TNode TaggedToDirectString(TNode value, Label* fail); + TNode TaggedToDirectString(TNode value, Label* fail); - TNode TaggedToNumber(TNode value, Label* fail) { - GotoIfNot(IsNumber(value), fail); - return UncheckedCast(value); - } + TNode TaggedToNumber(TNode value, Label* fail) { + GotoIfNot(IsNumber(value), fail); + return UncheckedCast(value); + } - TNode TaggedToHeapObject(TNode value, Label* fail) { - GotoIf(TaggedIsSmi(value), fail); - return UncheckedCast(value); - } + TNode TaggedToHeapObject(TNode value, Label* fail) { + GotoIf(TaggedIsSmi(value), fail); + return UncheckedCast(value); + } - TNode HeapObjectToJSArray(TNode heap_object, - Label* fail) { - GotoIfNot(IsJSArray(heap_object), fail); - return UncheckedCast(heap_object); - } + TNode HeapObjectToJSArray(TNode heap_object, + Label* fail) { + GotoIfNot(IsJSArray(heap_object), fail); + return UncheckedCast(heap_object); + } - TNode HeapObjectToJSArrayBuffer(TNode heap_object, - Label* fail) { - GotoIfNot(IsJSArrayBuffer(heap_object), fail); - return UncheckedCast(heap_object); - } + TNode HeapObjectToJSArrayBuffer(TNode heap_object, + Label* fail) { + GotoIfNot(IsJSArrayBuffer(heap_object), fail); + return UncheckedCast(heap_object); + } - TNode TaggedToFastJSArray(TNode context, - TNode value, Label* fail) { - GotoIf(TaggedIsSmi(value), fail); - TNode heap_object = CAST(value); - GotoIfNot(IsFastJSArray(heap_object, context), fail); - return UncheckedCast(heap_object); - } + TNode TaggedToFastJSArray(TNode context, + TNode value, Label* fail) { + GotoIf(TaggedIsSmi(value), fail); + TNode heap_object = CAST(value); + GotoIfNot(IsFastJSArray(heap_object, context), fail); + return UncheckedCast(heap_object); + } - TNode HeapObjectToJSDataView(TNode heap_object, - Label* fail) { - GotoIfNot(IsJSDataView(heap_object), fail); - return CAST(heap_object); - } + TNode HeapObjectToJSDataView(TNode heap_object, + Label* fail) { + GotoIfNot(IsJSDataView(heap_object), fail); + return CAST(heap_object); + } - TNode HeapObjectToJSProxy(TNode heap_object, - Label* fail) { - GotoIfNot(IsJSProxy(heap_object), fail); - return CAST(heap_object); - } + TNode HeapObjectToJSProxy(TNode heap_object, + Label* fail) { + GotoIfNot(IsJSProxy(heap_object), fail); + return CAST(heap_object); + } - TNode HeapObjectToJSStringIterator( - TNode heap_object, Label* fail) { - GotoIfNot(IsJSStringIterator(heap_object), fail); - return CAST(heap_object); - } + TNode HeapObjectToJSStringIterator( + TNode heap_object, Label* fail) { + GotoIfNot(IsJSStringIterator(heap_object), fail); + return CAST(heap_object); + } - TNode HeapObjectToCallable(TNode heap_object, - Label* fail) { - GotoIfNot(IsCallable(heap_object), fail); - return CAST(heap_object); - } + TNode HeapObjectToCallable(TNode heap_object, + Label* fail) { + GotoIfNot(IsCallable(heap_object), fail); + return CAST(heap_object); + } - TNode HeapObjectToString(TNode heap_object, Label* fail) { - GotoIfNot(IsString(heap_object), fail); - return CAST(heap_object); - } + TNode HeapObjectToString(TNode heap_object, Label* fail) { + GotoIfNot(IsString(heap_object), fail); + return CAST(heap_object); + } - TNode HeapObjectToConstructor(TNode heap_object, - Label* fail) { - GotoIfNot(IsConstructor(heap_object), fail); - return CAST(heap_object); - } + TNode HeapObjectToConstructor(TNode heap_object, + Label* fail) { + GotoIfNot(IsConstructor(heap_object), fail); + return CAST(heap_object); + } - Node* MatchesParameterMode(Node* value, ParameterMode mode); + Node* MatchesParameterMode(Node* value, ParameterMode mode); #define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ Node* OpName(Node* a, Node* b, ParameterMode mode) { \ @@ -383,97 +395,113 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return IntPtrOpName(a, b); \ } \ } - PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin) - PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd) - PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub) - PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan) - PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual, - SmiLessThanOrEqual) - PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan) - PARAMETER_BINOP(IntPtrOrSmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual, - SmiGreaterThanOrEqual) - PARAMETER_BINOP(UintPtrOrSmiLessThan, UintPtrLessThan, SmiBelow) - PARAMETER_BINOP(UintPtrOrSmiGreaterThanOrEqual, UintPtrGreaterThanOrEqual, - SmiAboveOrEqual) + PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin) + PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd) + PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub) + PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan) + PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual, + SmiLessThanOrEqual) + PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan) + PARAMETER_BINOP(IntPtrOrSmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual, + SmiGreaterThanOrEqual) + PARAMETER_BINOP(UintPtrOrSmiLessThan, UintPtrLessThan, SmiBelow) + PARAMETER_BINOP(UintPtrOrSmiGreaterThanOrEqual, UintPtrGreaterThanOrEqual, + SmiAboveOrEqual) #undef PARAMETER_BINOP - uintptr_t ConstexprUintPtrShl(uintptr_t a, int32_t b) { return a << b; } - uintptr_t ConstexprUintPtrShr(uintptr_t a, int32_t b) { return a >> b; } - intptr_t ConstexprIntPtrAdd(intptr_t a, intptr_t b) { return a + b; } - uintptr_t ConstexprUintPtrAdd(uintptr_t a, uintptr_t b) { return a + b; } - intptr_t ConstexprWordNot(intptr_t a) { return ~a; } - uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; } + uintptr_t ConstexprUintPtrShl(uintptr_t a, int32_t b) { + return a << b; + } + uintptr_t ConstexprUintPtrShr(uintptr_t a, int32_t b) { + return a >> b; + } + intptr_t ConstexprIntPtrAdd(intptr_t a, intptr_t b) { + return a + b; + } + uintptr_t ConstexprUintPtrAdd(uintptr_t a, uintptr_t b) { + return a + b; + } + intptr_t ConstexprWordNot(intptr_t a) { + return ~a; + } + uintptr_t ConstexprWordNot(uintptr_t a) { + return ~a; + } - TNode NoContextConstant(); + TNode NoContextConstant(); #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ compiler::TNode().rootAccessorName())>::type>::type> \ name##Constant(); - HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) + HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ compiler::TNode().rootAccessorName())>::type>::type> \ name##Constant(); - HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) + HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ TNode Is##name(SloppyTNode value); \ TNode IsNot##name(SloppyTNode value); - HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) + HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) #undef HEAP_CONSTANT_TEST - Node* IntPtrOrSmiConstant(int value, ParameterMode mode); - - bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode); - bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value, - ParameterMode mode); - - // Round the 32bits payload of the provided word up to the next power of two. - TNode IntPtrRoundUpToPowerOfTwo32(TNode value); - // Select the maximum of the two provided IntPtr values. - TNode IntPtrMax(SloppyTNode left, - SloppyTNode right); - // Select the minimum of the two provided IntPtr values. - TNode IntPtrMin(SloppyTNode left, - SloppyTNode right); - - // Float64 operations. - TNode Float64Ceil(SloppyTNode x); - TNode Float64Floor(SloppyTNode x); - TNode Float64Round(SloppyTNode x); - TNode Float64RoundToEven(SloppyTNode x); - TNode Float64Trunc(SloppyTNode x); - // Select the minimum of the two provided Number values. - TNode NumberMax(SloppyTNode left, SloppyTNode right); - // Select the minimum of the two provided Number values. - TNode NumberMin(SloppyTNode left, SloppyTNode right); - - // After converting an index to an integer, calculate a relative index: if - // index < 0, max(length + index, 0); else min(index, length) - TNode ConvertToRelativeIndex(TNode context, - TNode index, - TNode length); - - // Returns true iff the given value fits into smi range and is >= 0. - TNode IsValidPositiveSmi(TNode value); - - // Tag an IntPtr as a Smi value. - TNode SmiTag(SloppyTNode value); - // Untag a Smi value as an IntPtr. - TNode SmiUntag(SloppyTNode value); - - // Smi conversions. - TNode SmiToFloat64(SloppyTNode value); - TNode SmiFromIntPtr(SloppyTNode value) { return SmiTag(value); } - TNode SmiFromInt32(SloppyTNode value); - TNode SmiToIntPtr(SloppyTNode value) { return SmiUntag(value); } - TNode SmiToInt32(SloppyTNode value); - - // Smi operations. + Node* IntPtrOrSmiConstant(int value, ParameterMode mode); + + bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode); + bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value, + ParameterMode mode); + + // Round the 32bits payload of the provided word up to the next power of two. + TNode IntPtrRoundUpToPowerOfTwo32(TNode value); + // Select the maximum of the two provided IntPtr values. + TNode IntPtrMax(SloppyTNode left, + SloppyTNode right); + // Select the minimum of the two provided IntPtr values. + TNode IntPtrMin(SloppyTNode left, + SloppyTNode right); + + // Float64 operations. + TNode Float64Ceil(SloppyTNode x); + TNode Float64Floor(SloppyTNode x); + TNode Float64Round(SloppyTNode x); + TNode Float64RoundToEven(SloppyTNode x); + TNode Float64Trunc(SloppyTNode x); + // Select the minimum of the two provided Number values. + TNode NumberMax(SloppyTNode left, SloppyTNode right); + // Select the minimum of the two provided Number values. + TNode NumberMin(SloppyTNode left, SloppyTNode right); + + // After converting an index to an integer, calculate a relative index: if + // index < 0, max(length + index, 0); else min(index, length) + TNode ConvertToRelativeIndex(TNode context, + TNode index, + TNode length); + + // Returns true iff the given value fits into smi range and is >= 0. + TNode IsValidPositiveSmi(TNode value); + + // Tag an IntPtr as a Smi value. + TNode SmiTag(SloppyTNode value); + // Untag a Smi value as an IntPtr. + TNode SmiUntag(SloppyTNode value); + + // Smi conversions. + TNode SmiToFloat64(SloppyTNode value); + TNode SmiFromIntPtr(SloppyTNode value) { + return SmiTag(value); + } + TNode SmiFromInt32(SloppyTNode value); + TNode SmiToIntPtr(SloppyTNode value) { + return SmiUntag(value); + } + TNode SmiToInt32(SloppyTNode value); + + // Smi operations. #define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ TNode SmiOpName(TNode a, TNode b) { \ if (SmiValuesAre32Bits()) { \ @@ -490,56 +518,58 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \ } \ } - SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add) - SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub) - SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And) - SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or) + SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add) + SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub) + SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And) + SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or) #undef SMI_ARITHMETIC_BINOP - TNode SmiInc(TNode value) { return SmiAdd(value, SmiConstant(1)); } + TNode SmiInc(TNode value) { + return SmiAdd(value, SmiConstant(1)); + } - TNode TryIntPtrAdd(TNode a, TNode b, + TNode TryIntPtrAdd(TNode a, TNode b, + Label* if_overflow); + TNode TryIntPtrSub(TNode a, TNode b, + Label* if_overflow); + TNode TryInt32Mul(TNode a, TNode b, Label* if_overflow); - TNode TryIntPtrSub(TNode a, TNode b, - Label* if_overflow); - TNode TryInt32Mul(TNode a, TNode b, - Label* if_overflow); - TNode TrySmiAdd(TNode a, TNode b, Label* if_overflow); - TNode TrySmiSub(TNode a, TNode b, Label* if_overflow); - - TNode SmiShl(TNode a, int shift) { - return BitcastWordToTaggedSigned( - WordShl(BitcastTaggedSignedToWord(a), shift)); - } + TNode TrySmiAdd(TNode a, TNode b, Label* if_overflow); + TNode TrySmiSub(TNode a, TNode b, Label* if_overflow); - TNode SmiShr(TNode a, int shift) { - return BitcastWordToTaggedSigned( - WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift), - BitcastTaggedSignedToWord(SmiConstant(-1)))); - } + TNode SmiShl(TNode a, int shift) { + return BitcastWordToTaggedSigned( + WordShl(BitcastTaggedSignedToWord(a), shift)); + } - TNode SmiSar(TNode a, int shift) { - return BitcastWordToTaggedSigned( - WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift), - BitcastTaggedSignedToWord(SmiConstant(-1)))); - } + TNode SmiShr(TNode a, int shift) { + return BitcastWordToTaggedSigned( + WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift), + BitcastTaggedSignedToWord(SmiConstant(-1)))); + } - Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiShl(CAST(a), shift); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return WordShl(a, shift); + TNode SmiSar(TNode a, int shift) { + return BitcastWordToTaggedSigned( + WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift), + BitcastTaggedSignedToWord(SmiConstant(-1)))); } - } - Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiShr(CAST(a), shift); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return WordShr(a, shift); + Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) { + if (mode == SMI_PARAMETERS) { + return SmiShl(CAST(a), shift); + } else { + DCHECK_EQ(INTPTR_PARAMETERS, mode); + return WordShl(a, shift); + } + } + + Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) { + if (mode == SMI_PARAMETERS) { + return SmiShr(CAST(a), shift); + } else { + DCHECK_EQ(INTPTR_PARAMETERS, mode); + return WordShr(a, shift); + } } - } #define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ TNode SmiOpName(TNode a, TNode b) { \ @@ -556,3212 +586,3230 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \ } \ } - SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal) - SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual) - SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan, Uint32GreaterThan) - SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual, - Uint32GreaterThanOrEqual) - SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan, Uint32LessThan) - SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan, Int32LessThan) - SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual, - Int32LessThanOrEqual) - SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan, Int32GreaterThan) - SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual, - Int32GreaterThanOrEqual) + SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal) + SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual) + SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan, Uint32GreaterThan) + SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual, + Uint32GreaterThanOrEqual) + SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan, Uint32LessThan) + SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan, Int32LessThan) + SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual, + Int32LessThanOrEqual) + SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan, Int32GreaterThan) + SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual, + Int32GreaterThanOrEqual) #undef SMI_COMPARISON_OP - TNode SmiMax(TNode a, TNode b); - TNode SmiMin(TNode a, TNode b); - // Computes a % b for Smi inputs a and b; result is not necessarily a Smi. - TNode SmiMod(TNode a, TNode b); - // Computes a * b for Smi inputs a and b; result is not necessarily a Smi. - TNode SmiMul(TNode a, TNode b); - // Tries to compute dividend / divisor for Smi inputs; branching to bailout - // if the division needs to be performed as a floating point operation. - TNode TrySmiDiv(TNode dividend, TNode divisor, Label* bailout); - - // Compares two Smis a and b as if they were converted to strings and then - // compared lexicographically. Returns: - // -1 iff x < y. - // 0 iff x == y. - // 1 iff x > y. - TNode SmiLexicographicCompare(TNode x, TNode y); - - // Smi | HeapNumber operations. - TNode NumberInc(SloppyTNode value); - TNode NumberDec(SloppyTNode value); - TNode NumberAdd(SloppyTNode a, SloppyTNode b); - TNode NumberSub(SloppyTNode a, SloppyTNode b); - void GotoIfNotNumber(Node* value, Label* is_not_number); - void GotoIfNumber(Node* value, Label* is_number); - TNode SmiToNumber(TNode v) { return v; } - - TNode BitwiseOp(Node* left32, Node* right32, Operation bitwise_op); - - // Allocate an object of the given size. - TNode AllocateInNewSpace(TNode size, - AllocationFlags flags = kNone); - TNode AllocateInNewSpace(int size, AllocationFlags flags = kNone); - TNode Allocate(TNode size, - AllocationFlags flags = kNone); - TNode Allocate(int size, AllocationFlags flags = kNone); - TNode InnerAllocate(TNode previous, int offset); - TNode InnerAllocate(TNode previous, - TNode offset); - - TNode IsRegularHeapObjectSize(TNode size); - - using BranchGenerator = std::function; - using NodeGenerator = std::function; - using ExtraNode = std::pair; - - void Assert(const BranchGenerator& branch, const char* message, - const char* file, int line, - std::initializer_list extra_nodes = {}); - void Assert(const NodeGenerator& condition_body, const char* message, - const char* file, int line, - std::initializer_list extra_nodes = {}); - void Check(const BranchGenerator& branch, const char* message, - const char* file, int line, - std::initializer_list extra_nodes = {}); - void Check(const NodeGenerator& condition_body, const char* message, - const char* file, int line, - std::initializer_list extra_nodes = {}); - void FailAssert(const char* message, const char* file, int line, - std::initializer_list extra_nodes = {}); - - void FastCheck(TNode condition); - - // The following Call wrappers call an object according to the semantics that - // one finds in the EcmaScript spec, operating on an Callable (e.g. a - // JSFunction or proxy) rather than a Code object. - template - TNode Call(TNode context, TNode callable, - TNode receiver, TArgs... args) { - return UncheckedCast(CallJS( - CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), - context, callable, receiver, args...)); - } - template - TNode Call(TNode context, TNode callable, - TNode receiver, TArgs... args) { - if (IsUndefinedConstant(receiver) || IsNullConstant(receiver)) { - return UncheckedCast(CallJS( - CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), - context, callable, receiver, args...)); - } - return UncheckedCast(CallJS(CodeFactory::Call(isolate()), context, - callable, receiver, args...)); - } + TNode SmiMax(TNode a, TNode b); + TNode SmiMin(TNode a, TNode b); + // Computes a % b for Smi inputs a and b; result is not necessarily a Smi. + TNode SmiMod(TNode a, TNode b); + // Computes a * b for Smi inputs a and b; result is not necessarily a Smi. + TNode SmiMul(TNode a, TNode b); + // Tries to compute dividend / divisor for Smi inputs; branching to bailout + // if the division needs to be performed as a floating point operation. + TNode TrySmiDiv(TNode dividend, TNode divisor, Label* bailout); + + // Compares two Smis a and b as if they were converted to strings and then + // compared lexicographically. Returns: + // -1 iff x < y. + // 0 iff x == y. + // 1 iff x > y. + TNode SmiLexicographicCompare(TNode x, TNode y); + + // Smi | HeapNumber operations. + TNode NumberInc(SloppyTNode value); + TNode NumberDec(SloppyTNode value); + TNode NumberAdd(SloppyTNode a, SloppyTNode b); + TNode NumberSub(SloppyTNode a, SloppyTNode b); + void GotoIfNotNumber(Node* value, Label* is_not_number); + void GotoIfNumber(Node* value, Label* is_number); + TNode SmiToNumber(TNode v) { + return v; + } - template - TNode ConstructWithTarget(TNode context, - TNode target, - TNode new_target, - TArgs... args) { - return CAST(ConstructJSWithTarget(CodeFactory::Construct(isolate()), - context, target, new_target, - implicit_cast>(args)...)); - } - template - TNode Construct(TNode context, - TNode new_target, TArgs... args) { - return ConstructWithTarget(context, new_target, new_target, args...); - } + TNode BitwiseOp(Node* left32, Node* right32, Operation bitwise_op); - template - TNode Select(SloppyTNode condition, const F& true_body, - const G& false_body) { - return UncheckedCast(SelectImpl( - condition, - [&]() -> Node* { return implicit_cast>(true_body()); }, - [&]() -> Node* { return implicit_cast>(false_body()); }, - MachineRepresentationOf::value)); - } + // Allocate an object of the given size. + TNode AllocateInNewSpace(TNode size, + AllocationFlags flags = kNone); + TNode AllocateInNewSpace(int size, AllocationFlags flags = kNone); + TNode Allocate(TNode size, + AllocationFlags flags = kNone); + TNode Allocate(int size, AllocationFlags flags = kNone); + TNode InnerAllocate(TNode previous, int offset); + TNode InnerAllocate(TNode previous, + TNode offset); + + TNode IsRegularHeapObjectSize(TNode size); + + using BranchGenerator = std::function; + using NodeGenerator = std::function; + using ExtraNode = std::pair; + + void Assert(const BranchGenerator& branch, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Assert(const NodeGenerator& condition_body, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Check(const BranchGenerator& branch, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Check(const NodeGenerator& condition_body, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void FailAssert(const char* message, const char* file, int line, + std::initializer_list extra_nodes = {}); + + void FastCheck(TNode condition); + + // The following Call wrappers call an object according to the semantics that + // one finds in the EcmaScript spec, operating on an Callable (e.g. a + // JSFunction or proxy) rather than a Code object. + template + TNode Call(TNode context, TNode callable, + TNode receiver, TArgs... args) { + return UncheckedCast(CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), + context, callable, receiver, args...)); + } + template + TNode Call(TNode context, TNode callable, + TNode receiver, TArgs... args) { + if (IsUndefinedConstant(receiver) || IsNullConstant(receiver)) { + return UncheckedCast(CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, callable, receiver, args...)); + } + return UncheckedCast(CallJS(CodeFactory::Call(isolate()), context, + callable, receiver, args...)); + } - template - TNode SelectConstant(TNode condition, TNode true_value, - TNode false_value) { - return Select( - condition, [=] { return true_value; }, [=] { return false_value; }); - } + template + TNode ConstructWithTarget(TNode context, + TNode target, + TNode new_target, + TArgs... args) { + return CAST(ConstructJSWithTarget(CodeFactory::Construct(isolate()), + context, target, new_target, + implicit_cast>(args)...)); + } + template + TNode Construct(TNode context, + TNode new_target, TArgs... args) { + return ConstructWithTarget(context, new_target, new_target, args...); + } + + template + TNode Select(SloppyTNode condition, const F& true_body, + const G& false_body) { + return UncheckedCast(SelectImpl( + condition, + [&]() -> Node* { return implicit_cast>(true_body()); }, + [&]() -> Node* { return implicit_cast>(false_body()); }, + MachineRepresentationOf::value)); + } + + template + TNode SelectConstant(TNode condition, TNode true_value, + TNode false_value) { + return Select( + condition, [=] { return true_value; }, [=] { return false_value; }); + } - TNode SelectInt32Constant(SloppyTNode condition, - int true_value, int false_value); - TNode SelectIntPtrConstant(SloppyTNode condition, + TNode SelectInt32Constant(SloppyTNode condition, int true_value, int false_value); - TNode SelectBooleanConstant(SloppyTNode condition); - TNode SelectSmiConstant(SloppyTNode condition, Smi true_value, - Smi false_value); - TNode SelectSmiConstant(SloppyTNode condition, int true_value, - Smi false_value) { - return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value); - } - TNode SelectSmiConstant(SloppyTNode condition, Smi true_value, - int false_value) { - return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value)); - } - TNode SelectSmiConstant(SloppyTNode condition, int true_value, - int false_value) { - return SelectSmiConstant(condition, Smi::FromInt(true_value), - Smi::FromInt(false_value)); - } + TNode SelectIntPtrConstant(SloppyTNode condition, + int true_value, int false_value); + TNode SelectBooleanConstant(SloppyTNode condition); + TNode SelectSmiConstant(SloppyTNode condition, Smi true_value, + Smi false_value); + TNode SelectSmiConstant(SloppyTNode condition, int true_value, + Smi false_value) { + return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value); + } + TNode SelectSmiConstant(SloppyTNode condition, Smi true_value, + int false_value) { + return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value)); + } + TNode SelectSmiConstant(SloppyTNode condition, int true_value, + int false_value) { + return SelectSmiConstant(condition, Smi::FromInt(true_value), + Smi::FromInt(false_value)); + } - TNode SingleCharacterStringConstant(char const* single_char) { - DCHECK_EQ(strlen(single_char), 1); - return HeapConstant( - isolate()->factory()->LookupSingleCharacterStringFromCode( - single_char[0])); - } + TNode SingleCharacterStringConstant(char const* single_char) { + DCHECK_EQ(strlen(single_char), 1); + return HeapConstant( + isolate()->factory()->LookupSingleCharacterStringFromCode( + single_char[0])); + } - TNode TruncateIntPtrToInt32(SloppyTNode value); + TNode TruncateIntPtrToInt32(SloppyTNode value); - // Check a value for smi-ness - TNode TaggedIsSmi(SloppyTNode a); - TNode TaggedIsSmi(TNode a); - TNode TaggedIsNotSmi(SloppyTNode a); - // Check that the value is a non-negative smi. - TNode TaggedIsPositiveSmi(SloppyTNode a); - // Check that a word has a word-aligned address. - TNode WordIsAligned(SloppyTNode word, size_t alignment); - TNode WordIsPowerOfTwo(SloppyTNode value); + // Check a value for smi-ness + TNode TaggedIsSmi(SloppyTNode a); + TNode TaggedIsSmi(TNode a); + TNode TaggedIsNotSmi(SloppyTNode a); + // Check that the value is a non-negative smi. + TNode TaggedIsPositiveSmi(SloppyTNode a); + // Check that a word has a word-aligned address. + TNode WordIsAligned(SloppyTNode word, size_t alignment); + TNode WordIsPowerOfTwo(SloppyTNode value); #if DEBUG - void Bind(Label* label, AssemblerDebugInfo debug_info); + void Bind(Label* label, AssemblerDebugInfo debug_info); #endif // DEBUG - void Bind(Label* label); + void Bind(Label* label); - template - void Bind(compiler::CodeAssemblerParameterizedLabel* label, - TNode*... phis) { - CodeAssembler::Bind(label, phis...); - } + template + void Bind(compiler::CodeAssemblerParameterizedLabel* label, + TNode*... phis) { + CodeAssembler::Bind(label, phis...); + } - void BranchIfSmiEqual(TNode a, TNode b, Label* if_true, - Label* if_false) { - Branch(SmiEqual(a, b), if_true, if_false); - } + void BranchIfSmiEqual(TNode a, TNode b, Label* if_true, + Label* if_false) { + Branch(SmiEqual(a, b), if_true, if_false); + } - void BranchIfSmiLessThan(TNode a, TNode b, Label* if_true, - Label* if_false) { - Branch(SmiLessThan(a, b), if_true, if_false); - } + void BranchIfSmiLessThan(TNode a, TNode b, Label* if_true, + Label* if_false) { + Branch(SmiLessThan(a, b), if_true, if_false); + } - void BranchIfSmiLessThanOrEqual(TNode a, TNode b, Label* if_true, - Label* if_false) { - Branch(SmiLessThanOrEqual(a, b), if_true, if_false); - } + void BranchIfSmiLessThanOrEqual(TNode a, TNode b, Label* if_true, + Label* if_false) { + Branch(SmiLessThanOrEqual(a, b), if_true, if_false); + } - void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) { - Branch(Float64Equal(value, value), if_false, if_true); - } + void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) { + Branch(Float64Equal(value, value), if_false, if_true); + } - // Branches to {if_true} if ToBoolean applied to {value} yields true, - // otherwise goes to {if_false}. - void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false); + // Branches to {if_true} if ToBoolean applied to {value} yields true, + // otherwise goes to {if_false}. + void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false); - // Branches to {if_false} if ToBoolean applied to {value} yields false, - // otherwise goes to {if_true}. - void BranchIfToBooleanIsFalse(Node* value, Label* if_false, Label* if_true) { - BranchIfToBooleanIsTrue(value, if_true, if_false); - } + // Branches to {if_false} if ToBoolean applied to {value} yields false, + // otherwise goes to {if_true}. + void BranchIfToBooleanIsFalse(Node* value, Label* if_false, Label* if_true) { + BranchIfToBooleanIsTrue(value, if_true, if_false); + } - void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false); + void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false); - // Branches to {if_true} when --force-slow-path flag has been passed. - // It's used for testing to ensure that slow path implementation behave - // equivalent to corresponding fast paths (where applicable). - // - // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. - void GotoIfForceSlowPath(Label* if_true); + // Branches to {if_true} when --force-slow-path flag has been passed. + // It's used for testing to ensure that slow path implementation behave + // equivalent to corresponding fast paths (where applicable). + // + // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. + void GotoIfForceSlowPath(Label* if_true); - // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect. - void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true); + // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect. + void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true); - // Load value from current parent frame by given offset in bytes. - Node* LoadFromParentFrame(int offset, - MachineType type = MachineType::AnyTagged()); + // Load value from current parent frame by given offset in bytes. + Node* LoadFromParentFrame(int offset, + MachineType type = MachineType::AnyTagged()); - // Load an object pointer from a buffer that isn't in the heap. - Node* LoadBufferObject(Node* buffer, int offset, MachineType type); - TNode LoadBufferObject(TNode buffer, int offset) { - return CAST(LoadBufferObject(buffer, offset, MachineType::AnyTagged())); - } - TNode LoadBufferPointer(TNode buffer, int offset) { - return UncheckedCast( - LoadBufferObject(buffer, offset, MachineType::Pointer())); - } - TNode LoadBufferSmi(TNode buffer, int offset) { - return CAST(LoadBufferObject(buffer, offset, MachineType::TaggedSigned())); - } - // Load a field from an object on the heap. - Node* LoadObjectField(SloppyTNode object, int offset, - MachineType type); - template , TNode>::value, - int>::type = 0> - TNode LoadObjectField(TNode object, int offset) { - return CAST(LoadObjectField(object, offset, MachineTypeOf::value)); - } - template , TNode>::value, - int>::type = 0> - TNode LoadObjectField(TNode object, int offset) { - return UncheckedCast( - LoadObjectField(object, offset, MachineTypeOf::value)); - } - TNode LoadObjectField(SloppyTNode object, int offset) { - return UncheckedCast( - LoadObjectField(object, offset, MachineType::AnyTagged())); - } - Node* LoadObjectField(SloppyTNode object, - SloppyTNode offset, MachineType type); - TNode LoadObjectField(SloppyTNode object, - SloppyTNode offset) { - return UncheckedCast( - LoadObjectField(object, offset, MachineType::AnyTagged())); - } - template , TNode>::value, - int>::type = 0> - TNode LoadObjectField(TNode object, TNode offset) { - return UncheckedCast( - LoadObjectField(object, offset, MachineTypeOf::value)); - } - // Load a SMI field and untag it. - TNode LoadAndUntagObjectField(SloppyTNode object, - int offset); - // Load a SMI field, untag it, and convert to Word32. - TNode LoadAndUntagToWord32ObjectField(Node* object, int offset); - // Load a SMI and untag it. - TNode LoadAndUntagSmi(Node* base, int index); - - TNode LoadMaybeWeakObjectField(SloppyTNode object, - int offset) { - return UncheckedCast( - LoadObjectField(object, offset, MachineType::AnyTagged())); - } + // Load an object pointer from a buffer that isn't in the heap. + Node* LoadBufferObject(Node* buffer, int offset, MachineType type); + TNode LoadBufferObject(TNode buffer, int offset) { + return CAST(LoadBufferObject(buffer, offset, MachineType::AnyTagged())); + } + TNode LoadBufferPointer(TNode buffer, int offset) { + return UncheckedCast( + LoadBufferObject(buffer, offset, MachineType::Pointer())); + } + TNode LoadBufferSmi(TNode buffer, int offset) { + return CAST(LoadBufferObject(buffer, offset, MachineType::TaggedSigned())); + } + // Load a field from an object on the heap. + Node* LoadObjectField(SloppyTNode object, int offset, + MachineType type); + template , TNode>::value, + int>::type = 0> + TNode LoadObjectField(TNode object, int offset) { + return CAST(LoadObjectField(object, offset, MachineTypeOf::value)); + } + template , TNode>::value, + int>::type = 0> + TNode LoadObjectField(TNode object, int offset) { + return UncheckedCast( + LoadObjectField(object, offset, MachineTypeOf::value)); + } + TNode LoadObjectField(SloppyTNode object, int offset) { + return UncheckedCast( + LoadObjectField(object, offset, MachineType::AnyTagged())); + } + Node* LoadObjectField(SloppyTNode object, + SloppyTNode offset, MachineType type); + TNode LoadObjectField(SloppyTNode object, + SloppyTNode offset) { + return UncheckedCast( + LoadObjectField(object, offset, MachineType::AnyTagged())); + } + template , TNode>::value, + int>::type = 0> + TNode LoadObjectField(TNode object, TNode offset) { + return UncheckedCast( + LoadObjectField(object, offset, MachineTypeOf::value)); + } + // Load a SMI field and untag it. + TNode LoadAndUntagObjectField(SloppyTNode object, + int offset); + // Load a SMI field, untag it, and convert to Word32. + TNode LoadAndUntagToWord32ObjectField(Node* object, int offset); + // Load a SMI and untag it. + TNode LoadAndUntagSmi(Node* base, int index); + + TNode LoadMaybeWeakObjectField(SloppyTNode object, + int offset) { + return UncheckedCast( + LoadObjectField(object, offset, MachineType::AnyTagged())); + } - TNode LoadConstructorOrBackPointer(TNode map) { - return LoadObjectField(map, Map::kConstructorOrBackPointerOffset); - } + TNode LoadConstructorOrBackPointer(TNode map) { + return LoadObjectField(map, Map::kConstructorOrBackPointerOffset); + } - // Reference is the CSA-equivalent of a Torque reference value, - // representing an inner pointer into a HeapObject. - struct Reference { - TNode object; - TNode offset; - - std::tuple, TNode> Flatten() const { - return std::make_tuple(object, offset); - } - }; - - template , TNode>::value, - int>::type = 0> - TNode LoadReference(Reference reference) { - TNode offset = - IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); - return CAST( - LoadFromObject(MachineTypeOf::value, reference.object, offset)); - } - template , TNode>::value, - int>::type = 0> - TNode LoadReference(Reference reference) { - TNode offset = - IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); - return UncheckedCast( - LoadFromObject(MachineTypeOf::value, reference.object, offset)); - } - template , TNode>::value, - int>::type = 0> - void StoreReference(Reference reference, TNode value) { - MachineRepresentation rep = MachineRepresentationOf::value; - StoreToObjectWriteBarrier write_barrier = StoreToObjectWriteBarrier::kFull; - if (std::is_same::value) { - write_barrier = StoreToObjectWriteBarrier::kNone; - } else if (std::is_same::value) { - write_barrier = StoreToObjectWriteBarrier::kMap; - } - TNode offset = - IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); - StoreToObject(rep, reference.object, offset, value, write_barrier); - } - template , TNode>::value, - int>::type = 0> - void StoreReference(Reference reference, TNode value) { - TNode offset = - IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); - StoreToObject(MachineRepresentationOf::value, reference.object, offset, - value, StoreToObjectWriteBarrier::kNone); - } + // Reference is the CSA-equivalent of a Torque reference value, + // representing an inner pointer into a HeapObject. + struct Reference { + TNode object; + TNode offset; + + std::tuple, TNode> Flatten() const { + return std::make_tuple(object, offset); + } + }; + + template , TNode>::value, + int>::type = 0> + TNode LoadReference(Reference reference) { + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + return CAST( + LoadFromObject(MachineTypeOf::value, reference.object, offset)); + } + template , TNode>::value, + int>::type = 0> + TNode LoadReference(Reference reference) { + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + return UncheckedCast( + LoadFromObject(MachineTypeOf::value, reference.object, offset)); + } + template , TNode>::value, + int>::type = 0> + void StoreReference(Reference reference, TNode value) { + MachineRepresentation rep = MachineRepresentationOf::value; + StoreToObjectWriteBarrier write_barrier = StoreToObjectWriteBarrier::kFull; + if (std::is_same::value) { + write_barrier = StoreToObjectWriteBarrier::kNone; + } else if (std::is_same::value) { + write_barrier = StoreToObjectWriteBarrier::kMap; + } + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + StoreToObject(rep, reference.object, offset, value, write_barrier); + } + template , TNode>::value, + int>::type = 0> + void StoreReference(Reference reference, TNode value) { + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + StoreToObject(MachineRepresentationOf::value, reference.object, offset, + value, StoreToObjectWriteBarrier::kNone); + } - // Tag a smi and store it. - void StoreAndTagSmi(Node* base, int offset, Node* value); - - // Load the floating point value of a HeapNumber. - TNode LoadHeapNumberValue(SloppyTNode object); - // Load the Map of an HeapObject. - TNode LoadMap(SloppyTNode object); - // Load the instance type of an HeapObject. - TNode LoadInstanceType(SloppyTNode object); - // Compare the instance the type of the object against the provided one. - TNode HasInstanceType(SloppyTNode object, - InstanceType type); - TNode DoesntHaveInstanceType(SloppyTNode object, - InstanceType type); - TNode TaggedDoesntHaveInstanceType(SloppyTNode any_tagged, - InstanceType type); - // Load the properties backing store of a JSObject. - TNode LoadSlowProperties(SloppyTNode object); - TNode LoadFastProperties(SloppyTNode object); - // Load the elements backing store of a JSObject. - TNode LoadElements(SloppyTNode object) { - return LoadJSObjectElements(object); - } - // Load the length of a JSArray instance. - TNode LoadJSArgumentsObjectWithLength( - SloppyTNode array); - // Load the length of a JSArray instance. - TNode LoadJSArrayLength(SloppyTNode array); - // Load the length of a fast JSArray instance. Returns a positive Smi. - TNode LoadFastJSArrayLength(SloppyTNode array); - // Load the length of a fixed array base instance. - TNode LoadFixedArrayBaseLength(SloppyTNode array); - // Load the length of a fixed array base instance. - TNode LoadAndUntagFixedArrayBaseLength( - SloppyTNode array); - // Load the length of a WeakFixedArray. - TNode LoadWeakFixedArrayLength(TNode array); - TNode LoadAndUntagWeakFixedArrayLength( - SloppyTNode array); - // Load the number of descriptors in DescriptorArray. - TNode LoadNumberOfDescriptors(TNode array); - // Load the bit field of a Map. - TNode LoadMapBitField(SloppyTNode map); - // Load bit field 2 of a map. - TNode LoadMapBitField2(SloppyTNode map); - // Load bit field 3 of a map. - TNode LoadMapBitField3(SloppyTNode map); - // Load the instance type of a map. - TNode LoadMapInstanceType(SloppyTNode map); - // Load the ElementsKind of a map. - TNode LoadMapElementsKind(SloppyTNode map); - TNode LoadElementsKind(SloppyTNode object); - // Load the instance descriptors of a map. - TNode LoadMapDescriptors(SloppyTNode map); - // Load the prototype of a map. - TNode LoadMapPrototype(SloppyTNode map); - // Load the prototype info of a map. The result has to be checked if it is a - // prototype info object or not. - TNode LoadMapPrototypeInfo(SloppyTNode map, - Label* if_has_no_proto_info); - // Load the instance size of a Map. - TNode LoadMapInstanceSizeInWords(SloppyTNode map); - // Load the inobject properties start of a Map (valid only for JSObjects). - TNode LoadMapInobjectPropertiesStartInWords(SloppyTNode map); - // Load the constructor function index of a Map (only for primitive maps). - TNode LoadMapConstructorFunctionIndex(SloppyTNode map); - // Load the constructor of a Map (equivalent to Map::GetConstructor()). - TNode LoadMapConstructor(SloppyTNode map); - // Load the EnumLength of a Map. - Node* LoadMapEnumLength(SloppyTNode map); - // Load the back-pointer of a Map. - TNode LoadMapBackPointer(SloppyTNode map); - // Checks that |map| has only simple properties, returns bitfield3. - TNode EnsureOnlyHasSimpleProperties(TNode map, - TNode instance_type, - Label* bailout); - // Load the identity hash of a JSRececiver. - TNode LoadJSReceiverIdentityHash(SloppyTNode receiver, - Label* if_no_hash = nullptr); - - // This is only used on a newly allocated PropertyArray which - // doesn't have an existing hash. - void InitializePropertyArrayLength(Node* property_array, Node* length, - ParameterMode mode); - - // Check if the map is set for slow properties. - TNode IsDictionaryMap(SloppyTNode map); - - // Load the hash field of a name as an uint32 value. - TNode LoadNameHashField(SloppyTNode name); - // Load the hash value of a name as an uint32 value. - // If {if_hash_not_computed} label is specified then it also checks if - // hash is actually computed. - TNode LoadNameHash(SloppyTNode name, - Label* if_hash_not_computed = nullptr); - - // Load length field of a String object as Smi value. - TNode LoadStringLengthAsSmi(SloppyTNode string); - // Load length field of a String object as intptr_t value. - TNode LoadStringLengthAsWord(SloppyTNode string); - // Load length field of a String object as uint32_t value. - TNode LoadStringLengthAsWord32(SloppyTNode string); - // Loads a pointer to the sequential String char array. - Node* PointerToSeqStringData(Node* seq_string); - // Load value field of a JSPrimitiveWrapper object. - Node* LoadJSPrimitiveWrapperValue(Node* object); - - // Figures out whether the value of maybe_object is: - // - a SMI (jump to "if_smi", "extracted" will be the SMI value) - // - a cleared weak reference (jump to "if_cleared", "extracted" will be - // untouched) - // - a weak reference (jump to "if_weak", "extracted" will be the object - // pointed to) - // - a strong reference (jump to "if_strong", "extracted" will be the object - // pointed to) - void DispatchMaybeObject(TNode maybe_object, Label* if_smi, - Label* if_cleared, Label* if_weak, Label* if_strong, - TVariable* extracted); - // See MaybeObject for semantics of these functions. - TNode IsStrong(TNode value); - // This variant is for overzealous checking. - TNode IsStrong(TNode value) { - return IsStrong(ReinterpretCast(value)); - } - TNode GetHeapObjectIfStrong(TNode value, - Label* if_not_strong); + // Tag a smi and store it. + void StoreAndTagSmi(Node* base, int offset, Node* value); + + // Load the floating point value of a HeapNumber. + TNode LoadHeapNumberValue(SloppyTNode object); + // Load the Map of an HeapObject. + TNode LoadMap(SloppyTNode object); + // Load the instance type of an HeapObject. + TNode LoadInstanceType(SloppyTNode object); + // Compare the instance the type of the object against the provided one. + TNode HasInstanceType(SloppyTNode object, + InstanceType type); + TNode DoesntHaveInstanceType(SloppyTNode object, + InstanceType type); + TNode TaggedDoesntHaveInstanceType(SloppyTNode any_tagged, + InstanceType type); + // Load the properties backing store of a JSObject. + TNode LoadSlowProperties(SloppyTNode object); + TNode LoadFastProperties(SloppyTNode object); + // Load the elements backing store of a JSObject. + TNode LoadElements(SloppyTNode object) { + return LoadJSObjectElements(object); + } + // Load the length of a JSArray instance. + TNode LoadJSArgumentsObjectWithLength( + SloppyTNode array); + // Load the length of a JSArray instance. + TNode LoadJSArrayLength(SloppyTNode array); + // Load the length of a fast JSArray instance. Returns a positive Smi. + TNode LoadFastJSArrayLength(SloppyTNode array); + // Load the length of a fixed array base instance. + TNode LoadFixedArrayBaseLength(SloppyTNode array); + // Load the length of a fixed array base instance. + TNode LoadAndUntagFixedArrayBaseLength( + SloppyTNode array); + // Load the length of a WeakFixedArray. + TNode LoadWeakFixedArrayLength(TNode array); + TNode LoadAndUntagWeakFixedArrayLength( + SloppyTNode array); + // Load the number of descriptors in DescriptorArray. + TNode LoadNumberOfDescriptors(TNode array); + // Load the bit field of a Map. + TNode LoadMapBitField(SloppyTNode map); + // Load bit field 2 of a map. + TNode LoadMapBitField2(SloppyTNode map); + // Load bit field 3 of a map. + TNode LoadMapBitField3(SloppyTNode map); + // Load the instance type of a map. + TNode LoadMapInstanceType(SloppyTNode map); + // Load the ElementsKind of a map. + TNode LoadMapElementsKind(SloppyTNode map); + TNode LoadElementsKind(SloppyTNode object); + // Load the instance descriptors of a map. + TNode LoadMapDescriptors(SloppyTNode map); + // Load the prototype of a map. + TNode LoadMapPrototype(SloppyTNode map); + // Load the prototype info of a map. The result has to be checked if it is a + // prototype info object or not. + TNode LoadMapPrototypeInfo(SloppyTNode map, + Label* if_has_no_proto_info); + // Load the instance size of a Map. + TNode LoadMapInstanceSizeInWords(SloppyTNode map); + // Load the inobject properties start of a Map (valid only for JSObjects). + TNode LoadMapInobjectPropertiesStartInWords(SloppyTNode map); + // Load the constructor function index of a Map (only for primitive maps). + TNode LoadMapConstructorFunctionIndex(SloppyTNode map); + // Load the constructor of a Map (equivalent to Map::GetConstructor()). + TNode LoadMapConstructor(SloppyTNode map); + // Load the EnumLength of a Map. + Node* LoadMapEnumLength(SloppyTNode map); + // Load the back-pointer of a Map. + TNode LoadMapBackPointer(SloppyTNode map); + // Checks that |map| has only simple properties, returns bitfield3. + TNode EnsureOnlyHasSimpleProperties(TNode map, + TNode instance_type, + Label* bailout); + // Load the identity hash of a JSRececiver. + TNode LoadJSReceiverIdentityHash(SloppyTNode receiver, + Label* if_no_hash = nullptr); + + // This is only used on a newly allocated PropertyArray which + // doesn't have an existing hash. + void InitializePropertyArrayLength(Node* property_array, Node* length, + ParameterMode mode); + + // Check if the map is set for slow properties. + TNode IsDictionaryMap(SloppyTNode map); + + // Load the hash field of a name as an uint32 value. + TNode LoadNameHashField(SloppyTNode name); + // Load the hash value of a name as an uint32 value. + // If {if_hash_not_computed} label is specified then it also checks if + // hash is actually computed. + TNode LoadNameHash(SloppyTNode name, + Label* if_hash_not_computed = nullptr); + + // Load length field of a String object as Smi value. + TNode LoadStringLengthAsSmi(SloppyTNode string); + // Load length field of a String object as intptr_t value. + TNode LoadStringLengthAsWord(SloppyTNode string); + // Load length field of a String object as uint32_t value. + TNode LoadStringLengthAsWord32(SloppyTNode string); + // Loads a pointer to the sequential String char array. + Node* PointerToSeqStringData(Node* seq_string); + // Load value field of a JSPrimitiveWrapper object. + Node* LoadJSPrimitiveWrapperValue(Node* object); + + // Figures out whether the value of maybe_object is: + // - a SMI (jump to "if_smi", "extracted" will be the SMI value) + // - a cleared weak reference (jump to "if_cleared", "extracted" will be + // untouched) + // - a weak reference (jump to "if_weak", "extracted" will be the object + // pointed to) + // - a strong reference (jump to "if_strong", "extracted" will be the object + // pointed to) + void DispatchMaybeObject(TNode maybe_object, Label* if_smi, + Label* if_cleared, Label* if_weak, Label* if_strong, + TVariable* extracted); + // See MaybeObject for semantics of these functions. + TNode IsStrong(TNode value); + // This variant is for overzealous checking. + TNode IsStrong(TNode value) { + return IsStrong(ReinterpretCast(value)); + } + TNode GetHeapObjectIfStrong(TNode value, + Label* if_not_strong); - TNode IsWeakOrCleared(TNode value); - TNode IsCleared(TNode value); - TNode IsNotCleared(TNode value); + TNode IsWeakOrCleared(TNode value); + TNode IsCleared(TNode value); + TNode IsNotCleared(TNode value); - // Removes the weak bit + asserts it was set. - TNode GetHeapObjectAssumeWeak(TNode value); + // Removes the weak bit + asserts it was set. + TNode GetHeapObjectAssumeWeak(TNode value); - TNode GetHeapObjectAssumeWeak(TNode value, - Label* if_cleared); + TNode GetHeapObjectAssumeWeak(TNode value, + Label* if_cleared); - TNode IsWeakReferenceTo(TNode object, - TNode value); - TNode IsNotWeakReferenceTo(TNode object, - TNode value); - TNode IsStrongReferenceTo(TNode object, + TNode IsWeakReferenceTo(TNode object, TNode value); + TNode IsNotWeakReferenceTo(TNode object, + TNode value); + TNode IsStrongReferenceTo(TNode object, + TNode value); + + TNode MakeWeak(TNode value); + + void FixedArrayBoundsCheck(TNode array, Node* index, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + + // Array is any array-like type that has a fixed header followed by + // tagged elements. + template + TNode LoadArrayLength(TNode array); + + // Array is any array-like type that has a fixed header followed by + // tagged elements. + template + TNode LoadArrayElement( + TNode array, int array_header_size, Node* index, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + + TNode LoadFixedArrayElement( + TNode object, Node* index, int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe, + CheckBounds check_bounds = CheckBounds::kAlways); + + // This doesn't emit a bounds-check. As part of the security-performance + // tradeoff, only use it if it is performance critical. + TNode UnsafeLoadFixedArrayElement( + TNode object, Node* index, int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + return LoadFixedArrayElement(object, index, additional_offset, + parameter_mode, needs_poisoning, + CheckBounds::kDebugOnly); + } - TNode MakeWeak(TNode value); - - void FixedArrayBoundsCheck(TNode array, Node* index, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - // Array is any array-like type that has a fixed header followed by - // tagged elements. - template - TNode LoadArrayLength(TNode array); - - // Array is any array-like type that has a fixed header followed by - // tagged elements. - template - TNode LoadArrayElement( - TNode array, int array_header_size, Node* index, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); - - TNode LoadFixedArrayElement( - TNode object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe, - CheckBounds check_bounds = CheckBounds::kAlways); - - // This doesn't emit a bounds-check. As part of the security-performance - // tradeoff, only use it if it is performance critical. - TNode UnsafeLoadFixedArrayElement( - TNode object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return LoadFixedArrayElement(object, index, additional_offset, - parameter_mode, needs_poisoning, - CheckBounds::kDebugOnly); - } + TNode LoadFixedArrayElement( + TNode object, TNode index, + LoadSensitivity needs_poisoning, + CheckBounds check_bounds = CheckBounds::kAlways) { + return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS, + needs_poisoning, check_bounds); + } + // This doesn't emit a bounds-check. As part of the security-performance + // tradeoff, only use it if it is performance critical. + TNode UnsafeLoadFixedArrayElement(TNode object, + TNode index, + LoadSensitivity needs_poisoning) { + return LoadFixedArrayElement(object, index, needs_poisoning, + CheckBounds::kDebugOnly); + } - TNode LoadFixedArrayElement( - TNode object, TNode index, - LoadSensitivity needs_poisoning, - CheckBounds check_bounds = CheckBounds::kAlways) { - return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS, - needs_poisoning, check_bounds); - } - // This doesn't emit a bounds-check. As part of the security-performance - // tradeoff, only use it if it is performance critical. - TNode UnsafeLoadFixedArrayElement(TNode object, - TNode index, - LoadSensitivity needs_poisoning) { - return LoadFixedArrayElement(object, index, needs_poisoning, - CheckBounds::kDebugOnly); - } + TNode LoadFixedArrayElement( + TNode object, TNode index, int additional_offset = 0, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + return LoadFixedArrayElement(object, index, additional_offset, + INTPTR_PARAMETERS, needs_poisoning); + } - TNode LoadFixedArrayElement( - TNode object, TNode index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return LoadFixedArrayElement(object, index, additional_offset, - INTPTR_PARAMETERS, needs_poisoning); - } + TNode LoadFixedArrayElement( + TNode object, int index, int additional_offset = 0, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + return LoadFixedArrayElement(object, IntPtrConstant(index), + additional_offset, INTPTR_PARAMETERS, + needs_poisoning); + } + // This doesn't emit a bounds-check. As part of the security-performance + // tradeoff, only use it if it is performance critical. + TNode UnsafeLoadFixedArrayElement( + TNode object, int index, int additional_offset = 0, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + return LoadFixedArrayElement(object, IntPtrConstant(index), + additional_offset, INTPTR_PARAMETERS, + needs_poisoning, CheckBounds::kDebugOnly); + } + TNode LoadFixedArrayElement(TNode object, + TNode index) { + return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS); + } - TNode LoadFixedArrayElement( - TNode object, int index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return LoadFixedArrayElement(object, IntPtrConstant(index), - additional_offset, INTPTR_PARAMETERS, - needs_poisoning); - } - // This doesn't emit a bounds-check. As part of the security-performance - // tradeoff, only use it if it is performance critical. - TNode UnsafeLoadFixedArrayElement( - TNode object, int index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return LoadFixedArrayElement(object, IntPtrConstant(index), - additional_offset, INTPTR_PARAMETERS, - needs_poisoning, CheckBounds::kDebugOnly); - } - TNode LoadFixedArrayElement(TNode object, - TNode index) { - return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS); - } + TNode LoadPropertyArrayElement(TNode object, + SloppyTNode index); + TNode LoadPropertyArrayLength(TNode object); + + // Load an element from an array and untag it and return it as Word32. + // Array is any array-like type that has a fixed header followed by + // tagged elements. + template + TNode LoadAndUntagToWord32ArrayElement( + TNode array, int array_header_size, Node* index, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + + // Load an array element from a FixedArray, untag it and return it as Word32. + TNode LoadAndUntagToWord32FixedArrayElement( + TNode object, Node* index, int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + + TNode LoadAndUntagToWord32FixedArrayElement( + TNode object, int index, int additional_offset = 0) { + return LoadAndUntagToWord32FixedArrayElement( + object, IntPtrConstant(index), additional_offset, INTPTR_PARAMETERS); + } - TNode LoadPropertyArrayElement(TNode object, - SloppyTNode index); - TNode LoadPropertyArrayLength(TNode object); - - // Load an element from an array and untag it and return it as Word32. - // Array is any array-like type that has a fixed header followed by - // tagged elements. - template - TNode LoadAndUntagToWord32ArrayElement( - TNode array, int array_header_size, Node* index, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - // Load an array element from a FixedArray, untag it and return it as Word32. - TNode LoadAndUntagToWord32FixedArrayElement( - TNode object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - TNode LoadAndUntagToWord32FixedArrayElement( - TNode object, int index, int additional_offset = 0) { - return LoadAndUntagToWord32FixedArrayElement( - object, IntPtrConstant(index), additional_offset, INTPTR_PARAMETERS); - } + // Load an array element from a WeakFixedArray. + TNode LoadWeakFixedArrayElement( + TNode object, Node* index, int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + + TNode LoadWeakFixedArrayElement( + TNode object, int index, int additional_offset = 0, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + return LoadWeakFixedArrayElement(object, IntPtrConstant(index), + additional_offset, INTPTR_PARAMETERS, + needs_poisoning); + } - // Load an array element from a WeakFixedArray. - TNode LoadWeakFixedArrayElement( - TNode object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + // Load an array element from a FixedDoubleArray. + TNode LoadFixedDoubleArrayElement( + SloppyTNode object, Node* index, + MachineType machine_type, int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + Label* if_hole = nullptr); + + Node* LoadFixedDoubleArrayElement(TNode object, + TNode index, + Label* if_hole = nullptr) { + return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0, + SMI_PARAMETERS, if_hole); + } - TNode LoadWeakFixedArrayElement( - TNode object, int index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return LoadWeakFixedArrayElement(object, IntPtrConstant(index), - additional_offset, INTPTR_PARAMETERS, - needs_poisoning); - } + Node* LoadFixedDoubleArrayElement(TNode object, + TNode index, + Label* if_hole = nullptr) { + return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0, + INTPTR_PARAMETERS, if_hole); + } - // Load an array element from a FixedDoubleArray. - TNode LoadFixedDoubleArrayElement( - SloppyTNode object, Node* index, - MachineType machine_type, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - Label* if_hole = nullptr); - - Node* LoadFixedDoubleArrayElement(TNode object, - TNode index, - Label* if_hole = nullptr) { - return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0, - SMI_PARAMETERS, if_hole); - } + // Load an array element from a FixedArray, FixedDoubleArray or a + // NumberDictionary (depending on the |elements_kind|) and return + // it as a tagged value. Assumes that the |index| passed a length + // check before. Bails out to |if_accessor| if the element that + // was found is an accessor, or to |if_hole| if the element at + // the given |index| is not found in |elements|. + TNode LoadFixedArrayBaseElementAsTagged( + TNode elements, TNode index, + TNode elements_kind, Label* if_accessor, Label* if_hole); + + // Load a feedback slot from a FeedbackVector. + TNode LoadFeedbackVectorSlot( + Node* object, Node* index, int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + + TNode LoadFeedbackVectorLength(TNode); + TNode LoadDoubleWithHoleCheck(TNode array, + TNode index, + Label* if_hole = nullptr); + TNode LoadDoubleWithHoleCheck(TNode array, + TNode index, + Label* if_hole = nullptr); + + // Load Float64 value by |base| + |offset| address. If the value is a double + // hole then jump to |if_hole|. If |machine_type| is None then only the hole + // check is generated. + TNode LoadDoubleWithHoleCheck( + SloppyTNode base, SloppyTNode offset, Label* if_hole, + MachineType machine_type = MachineType::Float64()); + TNode LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, Node* index_node, ElementsKind elements_kind, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + TNode LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, TNode index_node, + ElementsKind elements_kind) { + return LoadFixedTypedArrayElementAsTagged(data_pointer, index_node, + elements_kind, SMI_PARAMETERS); + } + TNode LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, TNode index, + TNode elements_kind); + // Parts of the above, factored out for readability: + TNode LoadFixedBigInt64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset); + TNode LoadFixedBigUint64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset); + // 64-bit platforms only: + TNode BigIntFromInt64(TNode value); + TNode BigIntFromUint64(TNode value); + // 32-bit platforms only: + TNode BigIntFromInt32Pair(TNode low, TNode high); + TNode BigIntFromUint32Pair(TNode low, TNode high); + + void StoreJSTypedArrayElementFromTagged(TNode context, + TNode typed_array, + TNode index_node, + TNode value, + ElementsKind elements_kind); + + // Context manipulation + TNode LoadContextElement(SloppyTNode context, + int slot_index); + TNode LoadContextElement(SloppyTNode context, + SloppyTNode slot_index); + TNode LoadContextElement(TNode context, + TNode slot_index); + void StoreContextElement(SloppyTNode context, int slot_index, + SloppyTNode value); + void StoreContextElement(SloppyTNode context, + SloppyTNode slot_index, + SloppyTNode value); + void StoreContextElementNoWriteBarrier(SloppyTNode context, + int slot_index, + SloppyTNode value); + TNode LoadNativeContext(SloppyTNode context); + // Calling this is only valid if there's a module context in the chain. + TNode LoadModuleContext(SloppyTNode context); + + void GotoIfContextElementEqual(Node* value, Node* native_context, + int slot_index, Label* if_equal) { + GotoIf(WordEqual(value, LoadContextElement(native_context, slot_index)), + if_equal); + } - Node* LoadFixedDoubleArrayElement(TNode object, - TNode index, - Label* if_hole = nullptr) { - return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0, - INTPTR_PARAMETERS, if_hole); - } + TNode LoadJSArrayElementsMap(ElementsKind kind, + SloppyTNode native_context); + TNode LoadJSArrayElementsMap(SloppyTNode kind, + SloppyTNode native_context); + + TNode HasPrototypeSlot(TNode function); + TNode IsGeneratorFunction(TNode function); + TNode HasPrototypeProperty(TNode function, TNode map); + void GotoIfPrototypeRequiresRuntimeLookup(TNode function, + TNode map, Label* runtime); + // Load the "prototype" property of a JSFunction. + Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout); + + TNode LoadSharedFunctionInfoBytecodeArray( + SloppyTNode shared); + + void StoreObjectByteNoWriteBarrier(TNode object, int offset, + TNode value); + + // Store the floating point value of a HeapNumber. + void StoreHeapNumberValue(SloppyTNode object, + SloppyTNode value); + void StoreMutableHeapNumberValue(SloppyTNode object, + SloppyTNode value); + // Store a field to an object on the heap. + void StoreObjectField(Node* object, int offset, Node* value); + void StoreObjectField(Node* object, Node* offset, Node* value); + void StoreObjectFieldNoWriteBarrier( + Node* object, int offset, Node* value, + MachineRepresentation rep = MachineRepresentation::kTagged); + void UnsafeStoreObjectFieldNoWriteBarrier(TNode object, + int offset, TNode value); + void StoreObjectFieldNoWriteBarrier( + Node* object, SloppyTNode offset, Node* value, + MachineRepresentation rep = MachineRepresentation::kTagged); + + template + void StoreObjectFieldNoWriteBarrier(Node* object, SloppyTNode offset, + TNode value) { + StoreObjectFieldNoWriteBarrier(object, offset, value, + MachineRepresentationOf::value); + } + template + void StoreObjectFieldNoWriteBarrier(Node* object, int offset, + TNode value) { + StoreObjectFieldNoWriteBarrier(object, offset, value, + MachineRepresentationOf::value); + } - // Load an array element from a FixedArray, FixedDoubleArray or a - // NumberDictionary (depending on the |elements_kind|) and return - // it as a tagged value. Assumes that the |index| passed a length - // check before. Bails out to |if_accessor| if the element that - // was found is an accessor, or to |if_hole| if the element at - // the given |index| is not found in |elements|. - TNode LoadFixedArrayBaseElementAsTagged( - TNode elements, TNode index, - TNode elements_kind, Label* if_accessor, Label* if_hole); - - // Load a feedback slot from a FeedbackVector. - TNode LoadFeedbackVectorSlot( - Node* object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - TNode LoadFeedbackVectorLength(TNode); - TNode LoadDoubleWithHoleCheck(TNode array, - TNode index, - Label* if_hole = nullptr); - TNode LoadDoubleWithHoleCheck(TNode array, - TNode index, - Label* if_hole = nullptr); - - // Load Float64 value by |base| + |offset| address. If the value is a double - // hole then jump to |if_hole|. If |machine_type| is None then only the hole - // check is generated. - TNode LoadDoubleWithHoleCheck( - SloppyTNode base, SloppyTNode offset, Label* if_hole, - MachineType machine_type = MachineType::Float64()); - TNode LoadFixedTypedArrayElementAsTagged( - TNode data_pointer, Node* index_node, ElementsKind elements_kind, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - TNode LoadFixedTypedArrayElementAsTagged( - TNode data_pointer, TNode index_node, - ElementsKind elements_kind) { - return LoadFixedTypedArrayElementAsTagged(data_pointer, index_node, - elements_kind, SMI_PARAMETERS); - } - TNode LoadFixedTypedArrayElementAsTagged( - TNode data_pointer, TNode index, - TNode elements_kind); - // Parts of the above, factored out for readability: - TNode LoadFixedBigInt64ArrayElementAsTagged( - SloppyTNode data_pointer, SloppyTNode offset); - TNode LoadFixedBigUint64ArrayElementAsTagged( - SloppyTNode data_pointer, SloppyTNode offset); - // 64-bit platforms only: - TNode BigIntFromInt64(TNode value); - TNode BigIntFromUint64(TNode value); - // 32-bit platforms only: - TNode BigIntFromInt32Pair(TNode low, TNode high); - TNode BigIntFromUint32Pair(TNode low, TNode high); - - void StoreJSTypedArrayElementFromTagged(TNode context, - TNode typed_array, - TNode index_node, - TNode value, - ElementsKind elements_kind); - - // Context manipulation - TNode LoadContextElement(SloppyTNode context, - int slot_index); - TNode LoadContextElement(SloppyTNode context, - SloppyTNode slot_index); - TNode LoadContextElement(TNode context, - TNode slot_index); - void StoreContextElement(SloppyTNode context, int slot_index, - SloppyTNode value); - void StoreContextElement(SloppyTNode context, - SloppyTNode slot_index, - SloppyTNode value); - void StoreContextElementNoWriteBarrier(SloppyTNode context, - int slot_index, - SloppyTNode value); - TNode LoadNativeContext(SloppyTNode context); - // Calling this is only valid if there's a module context in the chain. - TNode LoadModuleContext(SloppyTNode context); - - void GotoIfContextElementEqual(Node* value, Node* native_context, - int slot_index, Label* if_equal) { - GotoIf(WordEqual(value, LoadContextElement(native_context, slot_index)), - if_equal); - } + // Store the Map of an HeapObject. + void StoreMap(Node* object, Node* map); + void StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index); + void StoreMapNoWriteBarrier(Node* object, Node* map); + void StoreObjectFieldRoot(Node* object, int offset, RootIndex root); + // Store an array element to a FixedArray. + void StoreFixedArrayElement( + TNode object, int index, SloppyTNode value, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + CheckBounds check_bounds = CheckBounds::kAlways) { + return StoreFixedArrayElement(object, IntPtrConstant(index), value, + barrier_mode, 0, INTPTR_PARAMETERS, + check_bounds); + } + // This doesn't emit a bounds-check. As part of the security-performance + // tradeoff, only use it if it is performance critical. + void UnsafeStoreFixedArrayElement( + TNode object, int index, SloppyTNode value, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { + return StoreFixedArrayElement(object, index, value, barrier_mode, + CheckBounds::kDebugOnly); + } + void UnsafeStoreFixedArrayElement( + TNode object, int index, TNode value, + WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER) { + DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode); + return StoreFixedArrayElement(object, index, value, + UNSAFE_SKIP_WRITE_BARRIER, + CheckBounds::kDebugOnly); + } + void StoreFixedArrayElement(TNode object, int index, + TNode value, + CheckBounds check_bounds = CheckBounds::kAlways) { + return StoreFixedArrayElement(object, IntPtrConstant(index), value, + UNSAFE_SKIP_WRITE_BARRIER, 0, + INTPTR_PARAMETERS, check_bounds); + } + // This doesn't emit a bounds-check. As part of the security-performance + // tradeoff, only use it if it is performance critical. + void UnsafeStoreFixedArrayElement(TNode object, int index, + TNode value) { + return StoreFixedArrayElement(object, index, value, + CheckBounds::kDebugOnly); + } - TNode LoadJSArrayElementsMap(ElementsKind kind, - SloppyTNode native_context); - TNode LoadJSArrayElementsMap(SloppyTNode kind, - SloppyTNode native_context); - - TNode HasPrototypeSlot(TNode function); - TNode IsGeneratorFunction(TNode function); - TNode HasPrototypeProperty(TNode function, TNode map); - void GotoIfPrototypeRequiresRuntimeLookup(TNode function, - TNode map, Label* runtime); - // Load the "prototype" property of a JSFunction. - Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout); - - TNode LoadSharedFunctionInfoBytecodeArray( - SloppyTNode shared); - - void StoreObjectByteNoWriteBarrier(TNode object, int offset, - TNode value); - - // Store the floating point value of a HeapNumber. - void StoreHeapNumberValue(SloppyTNode object, - SloppyTNode value); - void StoreMutableHeapNumberValue(SloppyTNode object, - SloppyTNode value); - // Store a field to an object on the heap. - void StoreObjectField(Node* object, int offset, Node* value); - void StoreObjectField(Node* object, Node* offset, Node* value); - void StoreObjectFieldNoWriteBarrier( - Node* object, int offset, Node* value, - MachineRepresentation rep = MachineRepresentation::kTagged); - void UnsafeStoreObjectFieldNoWriteBarrier(TNode object, - int offset, TNode value); - void StoreObjectFieldNoWriteBarrier( - Node* object, SloppyTNode offset, Node* value, - MachineRepresentation rep = MachineRepresentation::kTagged); - - template - void StoreObjectFieldNoWriteBarrier(Node* object, SloppyTNode offset, - TNode value) { - StoreObjectFieldNoWriteBarrier(object, offset, value, - MachineRepresentationOf::value); - } - template - void StoreObjectFieldNoWriteBarrier(Node* object, int offset, - TNode value) { - StoreObjectFieldNoWriteBarrier(object, offset, value, - MachineRepresentationOf::value); - } + void StoreJSArrayLength(TNode array, TNode length); + void StoreElements(TNode object, TNode elements); + + void StoreFixedArrayOrPropertyArrayElement( + Node* array, Node* index, Node* value, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + + void StoreFixedArrayElement( + TNode array, Node* index, SloppyTNode value, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + CheckBounds check_bounds = CheckBounds::kAlways) { + if (NeedsBoundsCheck(check_bounds)) { + FixedArrayBoundsCheck(array, index, additional_offset, parameter_mode); + } + StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode, + additional_offset, parameter_mode); + } - // Store the Map of an HeapObject. - void StoreMap(Node* object, Node* map); - void StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index); - void StoreMapNoWriteBarrier(Node* object, Node* map); - void StoreObjectFieldRoot(Node* object, int offset, RootIndex root); - // Store an array element to a FixedArray. - void StoreFixedArrayElement( - TNode object, int index, SloppyTNode value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - CheckBounds check_bounds = CheckBounds::kAlways) { - return StoreFixedArrayElement(object, IntPtrConstant(index), value, - barrier_mode, 0, INTPTR_PARAMETERS, - check_bounds); - } - // This doesn't emit a bounds-check. As part of the security-performance - // tradeoff, only use it if it is performance critical. - void UnsafeStoreFixedArrayElement( - TNode object, int index, SloppyTNode value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { - return StoreFixedArrayElement(object, index, value, barrier_mode, - CheckBounds::kDebugOnly); - } - void UnsafeStoreFixedArrayElement( - TNode object, int index, TNode value, - WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER) { - DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode); - return StoreFixedArrayElement(object, index, value, - UNSAFE_SKIP_WRITE_BARRIER, - CheckBounds::kDebugOnly); - } - void StoreFixedArrayElement(TNode object, int index, - TNode value, - CheckBounds check_bounds = CheckBounds::kAlways) { - return StoreFixedArrayElement(object, IntPtrConstant(index), value, - UNSAFE_SKIP_WRITE_BARRIER, 0, - INTPTR_PARAMETERS, check_bounds); - } - // This doesn't emit a bounds-check. As part of the security-performance - // tradeoff, only use it if it is performance critical. - void UnsafeStoreFixedArrayElement(TNode object, int index, - TNode value) { - return StoreFixedArrayElement(object, index, value, - CheckBounds::kDebugOnly); - } + // This doesn't emit a bounds-check. As part of the security-performance + // tradeoff, only use it if it is performance critical. + void UnsafeStoreFixedArrayElement( + TNode array, Node* index, SloppyTNode value, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS) { + return StoreFixedArrayElement(array, index, value, barrier_mode, + additional_offset, parameter_mode, + CheckBounds::kDebugOnly); + } + void UnsafeStoreFixedArrayElement( + TNode array, Node* index, TNode value, + WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS) { + DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode); + return StoreFixedArrayElement(array, index, value, + UNSAFE_SKIP_WRITE_BARRIER, additional_offset, + parameter_mode, CheckBounds::kDebugOnly); + } - void StoreJSArrayLength(TNode array, TNode length); - void StoreElements(TNode object, TNode elements); - - void StoreFixedArrayOrPropertyArrayElement( - Node* array, Node* index, Node* value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - void StoreFixedArrayElement( - TNode array, Node* index, SloppyTNode value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - CheckBounds check_bounds = CheckBounds::kAlways) { - if (NeedsBoundsCheck(check_bounds)) { - FixedArrayBoundsCheck(array, index, additional_offset, parameter_mode); - } - StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode, - additional_offset, parameter_mode); - } + void StorePropertyArrayElement( + TNode array, Node* index, SloppyTNode value, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS) { + StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode, + additional_offset, parameter_mode); + } - // This doesn't emit a bounds-check. As part of the security-performance - // tradeoff, only use it if it is performance critical. - void UnsafeStoreFixedArrayElement( - TNode array, Node* index, SloppyTNode value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS) { - return StoreFixedArrayElement(array, index, value, barrier_mode, - additional_offset, parameter_mode, - CheckBounds::kDebugOnly); - } - void UnsafeStoreFixedArrayElement( - TNode array, Node* index, TNode value, - WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS) { - DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode); - return StoreFixedArrayElement(array, index, value, - UNSAFE_SKIP_WRITE_BARRIER, additional_offset, - parameter_mode, CheckBounds::kDebugOnly); - } + void StoreFixedArrayElement( + TNode array, TNode index, TNode value, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { + StoreFixedArrayElement(array, index, value, barrier_mode, 0, + SMI_PARAMETERS); + } + void StoreFixedArrayElement( + TNode array, TNode index, TNode value, + WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER, + int additional_offset = 0) { + DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode); + StoreFixedArrayElement(array, index, TNode {value}, + UNSAFE_SKIP_WRITE_BARRIER, additional_offset); + } + void StoreFixedArrayElement( + TNode array, TNode index, TNode value, + WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER, + int additional_offset = 0) { + DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode); + StoreFixedArrayElement(array, index, TNode {value}, + UNSAFE_SKIP_WRITE_BARRIER, additional_offset, + SMI_PARAMETERS); + } - void StorePropertyArrayElement( - TNode array, Node* index, SloppyTNode value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS) { - StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode, - additional_offset, parameter_mode); - } + void StoreFixedDoubleArrayElement( + TNode object, Node* index, TNode value, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + CheckBounds check_bounds = CheckBounds::kAlways); + // This doesn't emit a bounds-check. As part of the security-performance + // tradeoff, only use it if it is performance critical. + void UnsafeStoreFixedDoubleArrayElement( + TNode object, Node* index, TNode value, + ParameterMode parameter_mode = INTPTR_PARAMETERS) { + return StoreFixedDoubleArrayElement(object, index, value, parameter_mode, + CheckBounds::kDebugOnly); + } - void StoreFixedArrayElement( - TNode array, TNode index, TNode value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { - StoreFixedArrayElement(array, index, value, barrier_mode, 0, - SMI_PARAMETERS); - } - void StoreFixedArrayElement( - TNode array, TNode index, TNode value, - WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER, - int additional_offset = 0) { - DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode); - StoreFixedArrayElement(array, index, TNode{value}, - UNSAFE_SKIP_WRITE_BARRIER, additional_offset); - } - void StoreFixedArrayElement( - TNode array, TNode index, TNode value, - WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER, - int additional_offset = 0) { - DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode); - StoreFixedArrayElement(array, index, TNode{value}, - UNSAFE_SKIP_WRITE_BARRIER, additional_offset, - SMI_PARAMETERS); - } + void StoreFixedDoubleArrayElementSmi(TNode object, + TNode index, + TNode value) { + StoreFixedDoubleArrayElement(object, index, value, SMI_PARAMETERS); + } - void StoreFixedDoubleArrayElement( - TNode object, Node* index, TNode value, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - CheckBounds check_bounds = CheckBounds::kAlways); - // This doesn't emit a bounds-check. As part of the security-performance - // tradeoff, only use it if it is performance critical. - void UnsafeStoreFixedDoubleArrayElement( - TNode object, Node* index, TNode value, - ParameterMode parameter_mode = INTPTR_PARAMETERS) { - return StoreFixedDoubleArrayElement(object, index, value, parameter_mode, - CheckBounds::kDebugOnly); - } + void StoreFixedDoubleArrayHole(TNode array, Node* index, + ParameterMode mode = INTPTR_PARAMETERS); + void StoreFixedDoubleArrayHoleSmi(TNode array, + TNode index) { + StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS); + } - void StoreFixedDoubleArrayElementSmi(TNode object, - TNode index, - TNode value) { - StoreFixedDoubleArrayElement(object, index, value, SMI_PARAMETERS); - } + void StoreFeedbackVectorSlot( + Node* object, Node* index, Node* value, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + + void EnsureArrayLengthWritable(TNode map, Label* bailout); + + // EnsureArrayPushable verifies that receiver with this map is: + // 1. Is not a prototype. + // 2. Is not a dictionary. + // 3. Has a writeable length property. + // It returns ElementsKind as a node for further division into cases. + TNode EnsureArrayPushable(TNode map, Label* bailout); + + void TryStoreArrayElement(ElementsKind kind, ParameterMode mode, + Label* bailout, Node* elements, Node* index, + Node* value); + // Consumes args into the array, and returns tagged new length. + TNode BuildAppendJSArray(ElementsKind kind, SloppyTNode array, + CodeStubArguments* args, + TVariable* arg_index, Label* bailout); + // Pushes value onto the end of array. + void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value, + Label* bailout); + + void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address, + Node* value); + + Node* AllocateCellWithValue(Node* value, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + Node* AllocateSmiCell(int value = 0) { + return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER); + } - void StoreFixedDoubleArrayHole(TNode array, Node* index, - ParameterMode mode = INTPTR_PARAMETERS); - void StoreFixedDoubleArrayHoleSmi(TNode array, - TNode index) { - StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS); - } + Node* LoadCellValue(Node* cell); - void StoreFeedbackVectorSlot( - Node* object, Node* index, Node* value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - void EnsureArrayLengthWritable(TNode map, Label* bailout); - - // EnsureArrayPushable verifies that receiver with this map is: - // 1. Is not a prototype. - // 2. Is not a dictionary. - // 3. Has a writeable length property. - // It returns ElementsKind as a node for further division into cases. - TNode EnsureArrayPushable(TNode map, Label* bailout); - - void TryStoreArrayElement(ElementsKind kind, ParameterMode mode, - Label* bailout, Node* elements, Node* index, - Node* value); - // Consumes args into the array, and returns tagged new length. - TNode BuildAppendJSArray(ElementsKind kind, SloppyTNode array, - CodeStubArguments* args, - TVariable* arg_index, Label* bailout); - // Pushes value onto the end of array. - void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value, - Label* bailout); - - void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address, - Node* value); - - Node* AllocateCellWithValue(Node* value, - WriteBarrierMode mode = UPDATE_WRITE_BARRIER); - Node* AllocateSmiCell(int value = 0) { - return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER); - } + void StoreCellValue(Node* cell, Node* value, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + + // Allocate a HeapNumber without initializing its value. + TNode AllocateHeapNumber(); + // Allocate a HeapNumber with a specific value. + TNode AllocateHeapNumberWithValue(SloppyTNode value); + TNode AllocateHeapNumberWithValue(double value) { + return AllocateHeapNumberWithValue(Float64Constant(value)); + } - Node* LoadCellValue(Node* cell); + // Allocate a MutableHeapNumber with a specific value. + TNode AllocateMutableHeapNumberWithValue( + SloppyTNode value); + + // Allocate a BigInt with {length} digits. Sets the sign bit to {false}. + // Does not initialize the digits. + TNode AllocateBigInt(TNode length); + // Like above, but allowing custom bitfield initialization. + TNode AllocateRawBigInt(TNode length); + void StoreBigIntBitfield(TNode bigint, TNode bitfield); + void StoreBigIntDigit(TNode bigint, intptr_t digit_index, + TNode digit); + void StoreBigIntDigit(TNode bigint, TNode digit_index, + TNode digit); + + TNode LoadBigIntBitfield(TNode bigint); + TNode LoadBigIntDigit(TNode bigint, intptr_t digit_index); + TNode LoadBigIntDigit(TNode bigint, + TNode digit_index); + + // Allocate a ByteArray with the given length. + TNode AllocateByteArray(TNode length, + AllocationFlags flags = kNone); - void StoreCellValue(Node* cell, Node* value, - WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + // Allocate a SeqOneByteString with the given length. + TNode AllocateSeqOneByteString(uint32_t length, + AllocationFlags flags = kNone); + TNode AllocateSeqOneByteString(Node* context, TNode length, + AllocationFlags flags = kNone); + // Allocate a SeqTwoByteString with the given length. + TNode AllocateSeqTwoByteString(uint32_t length, + AllocationFlags flags = kNone); + TNode AllocateSeqTwoByteString(Node* context, TNode length, + AllocationFlags flags = kNone); + + // Allocate a SlicedOneByteString with the given length, parent and offset. + // |length| and |offset| are expected to be tagged. + + TNode AllocateSlicedOneByteString(TNode length, + TNode parent, + TNode offset); + // Allocate a SlicedTwoByteString with the given length, parent and offset. + // |length| and |offset| are expected to be tagged. + TNode AllocateSlicedTwoByteString(TNode length, + TNode parent, + TNode offset); + + // Allocate an appropriate one- or two-byte ConsString with the first and + // second parts specified by |left| and |right|. + TNode AllocateConsString(TNode length, TNode left, + TNode right); + + TNode AllocateNameDictionary(int at_least_space_for); + TNode AllocateNameDictionary( + TNode at_least_space_for, AllocationFlags = kNone); + TNode AllocateNameDictionaryWithCapacity( + TNode capacity, AllocationFlags = kNone); + TNode CopyNameDictionary(TNode dictionary, + Label* large_object_fallback); + + template + Node* AllocateOrderedHashTable(); + + // Builds code that finds OrderedHashTable entry for a key with hash code + // {hash} with using the comparison code generated by {key_compare}. The code + // jumps to {entry_found} if the key is found, or to {not_found} if the key + // was not found. In the {entry_found} branch, the variable + // entry_start_position will be bound to the index of the entry (relative to + // OrderedHashTable::kHashTableStartIndex). + // + // The {CollectionType} template parameter stands for the particular instance + // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet. + template + void FindOrderedHashTableEntry( + Node* table, Node* hash, + const std::function& key_compare, + Variable* entry_start_position, Label* entry_found, Label* not_found); + + template + TNode AllocateSmallOrderedHashTable(TNode capacity); + + Node* AllocateStruct(Node* map, AllocationFlags flags = kNone); + void InitializeStructBody(Node* object, Node* map, Node* size, + int start_offset = Struct::kHeaderSize); + + TNode AllocateJSObjectFromMap( + SloppyTNode map, SloppyTNode properties = nullptr, + SloppyTNode elements = nullptr, AllocationFlags flags = kNone, + SlackTrackingMode slack_tracking_mode = kNoSlackTracking); + + void InitializeJSObjectFromMap( + Node* object, Node* map, Node* instance_size, Node* properties = nullptr, + Node* elements = nullptr, + SlackTrackingMode slack_tracking_mode = kNoSlackTracking); + + void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map, + Node* instance_size); + void InitializeJSObjectBodyNoSlackTracking( + Node* object, Node* map, Node* instance_size, + int start_offset = JSObject::kHeaderSize); + + TNode IsValidFastJSArrayCapacity(Node* capacity, + ParameterMode capacity_mode); + + // + // Allocate and return a JSArray with initialized header fields and its + // uninitialized elements. + // The ParameterMode argument is only used for the capacity parameter. + std::pair, TNode> + AllocateUninitializedJSArrayWithElements( + ElementsKind kind, TNode array_map, TNode length, + Node* allocation_site, Node* capacity, + ParameterMode capacity_mode = INTPTR_PARAMETERS, + AllocationFlags allocation_flags = kNone, + int array_header_size = JSArray::kSize); + + // Allocate a JSArray and fill elements with the hole. + // The ParameterMode argument is only used for the capacity parameter. + TNode AllocateJSArray( + ElementsKind kind, TNode array_map, Node* capacity, + TNode length, Node* allocation_site = nullptr, + ParameterMode capacity_mode = INTPTR_PARAMETERS, + AllocationFlags allocation_flags = kNone); + + TNode AllocateJSArray(ElementsKind kind, TNode array_map, + TNode capacity, TNode length) { + return AllocateJSArray(kind, array_map, capacity, length, nullptr, + SMI_PARAMETERS); + } - // Allocate a HeapNumber without initializing its value. - TNode AllocateHeapNumber(); - // Allocate a HeapNumber with a specific value. - TNode AllocateHeapNumberWithValue(SloppyTNode value); - TNode AllocateHeapNumberWithValue(double value) { - return AllocateHeapNumberWithValue(Float64Constant(value)); - } + TNode AllocateJSArray(ElementsKind kind, TNode array_map, + TNode capacity, TNode length, + AllocationFlags allocation_flags = kNone) { + return AllocateJSArray(kind, array_map, capacity, length, nullptr, + INTPTR_PARAMETERS, allocation_flags); + } - // Allocate a MutableHeapNumber with a specific value. - TNode AllocateMutableHeapNumberWithValue( - SloppyTNode value); - - // Allocate a BigInt with {length} digits. Sets the sign bit to {false}. - // Does not initialize the digits. - TNode AllocateBigInt(TNode length); - // Like above, but allowing custom bitfield initialization. - TNode AllocateRawBigInt(TNode length); - void StoreBigIntBitfield(TNode bigint, TNode bitfield); - void StoreBigIntDigit(TNode bigint, intptr_t digit_index, - TNode digit); - void StoreBigIntDigit(TNode bigint, TNode digit_index, - TNode digit); - - TNode LoadBigIntBitfield(TNode bigint); - TNode LoadBigIntDigit(TNode bigint, intptr_t digit_index); - TNode LoadBigIntDigit(TNode bigint, - TNode digit_index); - - // Allocate a ByteArray with the given length. - TNode AllocateByteArray(TNode length, - AllocationFlags flags = kNone); - - // Allocate a SeqOneByteString with the given length. - TNode AllocateSeqOneByteString(uint32_t length, - AllocationFlags flags = kNone); - TNode AllocateSeqOneByteString(Node* context, TNode length, - AllocationFlags flags = kNone); - // Allocate a SeqTwoByteString with the given length. - TNode AllocateSeqTwoByteString(uint32_t length, - AllocationFlags flags = kNone); - TNode AllocateSeqTwoByteString(Node* context, TNode length, - AllocationFlags flags = kNone); + // Allocate a JSArray and initialize the header fields. + TNode AllocateJSArray(TNode array_map, + TNode elements, + TNode length, + Node* allocation_site = nullptr, + int array_header_size = JSArray::kSize); + + enum class HoleConversionMode { kDontConvert, kConvertToUndefined }; + // Clone a fast JSArray |array| into a new fast JSArray. + // |convert_holes| tells the function to convert holes into undefined or not. + // If |convert_holes| is set to kConvertToUndefined, but the function did not + // find any hole in |array|, the resulting array will have the same elements + // kind as |array|. If the function did find a hole, it will convert holes in + // |array| to undefined in the resulting array, who will now have + // PACKED_ELEMENTS kind. + // If |convert_holes| is set kDontConvert, holes are also copied to the + // resulting array, who will have the same elements kind as |array|. The + // function generates significantly less code in this case. + Node* CloneFastJSArray( + Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS, + Node* allocation_site = nullptr, + HoleConversionMode convert_holes = HoleConversionMode::kDontConvert); + + Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count, + ParameterMode mode = INTPTR_PARAMETERS, + Node* capacity = nullptr, + Node* allocation_site = nullptr); + + TNode AllocateFixedArray( + ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS, + AllocationFlags flags = kNone, + SloppyTNode fixed_array_map = nullptr); + + TNode AllocateFixedArray( + ElementsKind kind, TNode capacity, AllocationFlags flags, + SloppyTNode fixed_array_map = nullptr) { + return AllocateFixedArray(kind, capacity, INTPTR_PARAMETERS, flags, + fixed_array_map); + } - // Allocate a SlicedOneByteString with the given length, parent and offset. - // |length| and |offset| are expected to be tagged. - - TNode AllocateSlicedOneByteString(TNode length, - TNode parent, - TNode offset); - // Allocate a SlicedTwoByteString with the given length, parent and offset. - // |length| and |offset| are expected to be tagged. - TNode AllocateSlicedTwoByteString(TNode length, - TNode parent, - TNode offset); - - // Allocate an appropriate one- or two-byte ConsString with the first and - // second parts specified by |left| and |right|. - TNode AllocateConsString(TNode length, TNode left, - TNode right); - - TNode AllocateNameDictionary(int at_least_space_for); - TNode AllocateNameDictionary( - TNode at_least_space_for, AllocationFlags = kNone); - TNode AllocateNameDictionaryWithCapacity( - TNode capacity, AllocationFlags = kNone); - TNode CopyNameDictionary(TNode dictionary, - Label* large_object_fallback); - - template - Node* AllocateOrderedHashTable(); - - // Builds code that finds OrderedHashTable entry for a key with hash code - // {hash} with using the comparison code generated by {key_compare}. The code - // jumps to {entry_found} if the key is found, or to {not_found} if the key - // was not found. In the {entry_found} branch, the variable - // entry_start_position will be bound to the index of the entry (relative to - // OrderedHashTable::kHashTableStartIndex). - // - // The {CollectionType} template parameter stands for the particular instance - // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet. - template - void FindOrderedHashTableEntry( - Node* table, Node* hash, - const std::function& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found); - - template - TNode AllocateSmallOrderedHashTable(TNode capacity); - - Node* AllocateStruct(Node* map, AllocationFlags flags = kNone); - void InitializeStructBody(Node* object, Node* map, Node* size, - int start_offset = Struct::kHeaderSize); - - TNode AllocateJSObjectFromMap( - SloppyTNode map, SloppyTNode properties = nullptr, - SloppyTNode elements = nullptr, AllocationFlags flags = kNone, - SlackTrackingMode slack_tracking_mode = kNoSlackTracking); - - void InitializeJSObjectFromMap( - Node* object, Node* map, Node* instance_size, Node* properties = nullptr, - Node* elements = nullptr, - SlackTrackingMode slack_tracking_mode = kNoSlackTracking); - - void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map, - Node* instance_size); - void InitializeJSObjectBodyNoSlackTracking( - Node* object, Node* map, Node* instance_size, - int start_offset = JSObject::kHeaderSize); - - TNode IsValidFastJSArrayCapacity(Node* capacity, - ParameterMode capacity_mode); - - // - // Allocate and return a JSArray with initialized header fields and its - // uninitialized elements. - // The ParameterMode argument is only used for the capacity parameter. - std::pair, TNode> - AllocateUninitializedJSArrayWithElements( - ElementsKind kind, TNode array_map, TNode length, - Node* allocation_site, Node* capacity, - ParameterMode capacity_mode = INTPTR_PARAMETERS, - AllocationFlags allocation_flags = kNone, - int array_header_size = JSArray::kSize); - - // Allocate a JSArray and fill elements with the hole. - // The ParameterMode argument is only used for the capacity parameter. - TNode AllocateJSArray( - ElementsKind kind, TNode array_map, Node* capacity, - TNode length, Node* allocation_site = nullptr, - ParameterMode capacity_mode = INTPTR_PARAMETERS, - AllocationFlags allocation_flags = kNone); - - TNode AllocateJSArray(ElementsKind kind, TNode array_map, - TNode capacity, TNode length) { - return AllocateJSArray(kind, array_map, capacity, length, nullptr, - SMI_PARAMETERS); - } + TNode GetStructMap(InstanceType instance_type); - TNode AllocateJSArray(ElementsKind kind, TNode array_map, - TNode capacity, TNode length, - AllocationFlags allocation_flags = kNone) { - return AllocateJSArray(kind, array_map, capacity, length, nullptr, - INTPTR_PARAMETERS, allocation_flags); - } + TNode AllocateUninitializedFixedArray(intptr_t capacity) { + return UncheckedCast(AllocateFixedArray( + PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone)); + } - // Allocate a JSArray and initialize the header fields. - TNode AllocateJSArray(TNode array_map, - TNode elements, - TNode length, - Node* allocation_site = nullptr, - int array_header_size = JSArray::kSize); - - enum class HoleConversionMode { kDontConvert, kConvertToUndefined }; - // Clone a fast JSArray |array| into a new fast JSArray. - // |convert_holes| tells the function to convert holes into undefined or not. - // If |convert_holes| is set to kConvertToUndefined, but the function did not - // find any hole in |array|, the resulting array will have the same elements - // kind as |array|. If the function did find a hole, it will convert holes in - // |array| to undefined in the resulting array, who will now have - // PACKED_ELEMENTS kind. - // If |convert_holes| is set kDontConvert, holes are also copied to the - // resulting array, who will have the same elements kind as |array|. The - // function generates significantly less code in this case. - Node* CloneFastJSArray( - Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS, - Node* allocation_site = nullptr, - HoleConversionMode convert_holes = HoleConversionMode::kDontConvert); - - Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count, - ParameterMode mode = INTPTR_PARAMETERS, - Node* capacity = nullptr, - Node* allocation_site = nullptr); - - TNode AllocateFixedArray( - ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS, - AllocationFlags flags = kNone, - SloppyTNode fixed_array_map = nullptr); - - TNode AllocateFixedArray( - ElementsKind kind, TNode capacity, AllocationFlags flags, - SloppyTNode fixed_array_map = nullptr) { - return AllocateFixedArray(kind, capacity, INTPTR_PARAMETERS, flags, - fixed_array_map); - } + TNode AllocateZeroedFixedArray(TNode capacity) { + TNode result = UncheckedCast( + AllocateFixedArray(PACKED_ELEMENTS, capacity, + AllocationFlag::kAllowLargeObjectAllocation)); + FillFixedArrayWithSmiZero(result, capacity); + return result; + } - TNode GetStructMap(InstanceType instance_type); + TNode AllocateZeroedFixedDoubleArray( + TNode capacity) { + TNode result = UncheckedCast( + AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, + AllocationFlag::kAllowLargeObjectAllocation)); + FillFixedDoubleArrayWithZero(result, capacity); + return result; + } - TNode AllocateUninitializedFixedArray(intptr_t capacity) { - return UncheckedCast(AllocateFixedArray( - PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone)); - } + TNode AllocateFixedArrayWithHoles(TNode capacity, + AllocationFlags flags) { + TNode result = UncheckedCast( + AllocateFixedArray(PACKED_ELEMENTS, capacity, flags)); + FillFixedArrayWithValue(PACKED_ELEMENTS, result, IntPtrConstant(0), + capacity, RootIndex::kTheHoleValue); + return result; + } - TNode AllocateZeroedFixedArray(TNode capacity) { - TNode result = UncheckedCast( - AllocateFixedArray(PACKED_ELEMENTS, capacity, - AllocationFlag::kAllowLargeObjectAllocation)); - FillFixedArrayWithSmiZero(result, capacity); - return result; - } + TNode AllocateFixedDoubleArrayWithHoles( + TNode capacity, AllocationFlags flags) { + TNode result = UncheckedCast( + AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, flags)); + FillFixedArrayWithValue(PACKED_DOUBLE_ELEMENTS, result, IntPtrConstant(0), + capacity, RootIndex::kTheHoleValue); + return result; + } - TNode AllocateZeroedFixedDoubleArray( - TNode capacity) { - TNode result = UncheckedCast( - AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, - AllocationFlag::kAllowLargeObjectAllocation)); - FillFixedDoubleArrayWithZero(result, capacity); - return result; - } + Node* AllocatePropertyArray(Node* capacity, + ParameterMode mode = INTPTR_PARAMETERS, + AllocationFlags flags = kNone); - TNode AllocateFixedArrayWithHoles(TNode capacity, - AllocationFlags flags) { - TNode result = UncheckedCast( - AllocateFixedArray(PACKED_ELEMENTS, capacity, flags)); - FillFixedArrayWithValue(PACKED_ELEMENTS, result, IntPtrConstant(0), - capacity, RootIndex::kTheHoleValue); - return result; - } + // Perform CreateArrayIterator (ES #sec-createarrayiterator). + TNode CreateArrayIterator(TNode context, + TNode object, + IterationKind mode); - TNode AllocateFixedDoubleArrayWithHoles( - TNode capacity, AllocationFlags flags) { - TNode result = UncheckedCast( - AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, flags)); - FillFixedArrayWithValue(PACKED_DOUBLE_ELEMENTS, result, IntPtrConstant(0), - capacity, RootIndex::kTheHoleValue); - return result; - } + TNode AllocateJSIteratorResult(SloppyTNode context, + SloppyTNode value, + SloppyTNode done); + Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value); - Node* AllocatePropertyArray(Node* capacity, - ParameterMode mode = INTPTR_PARAMETERS, - AllocationFlags flags = kNone); - - // Perform CreateArrayIterator (ES #sec-createarrayiterator). - TNode CreateArrayIterator(TNode context, - TNode object, - IterationKind mode); - - TNode AllocateJSIteratorResult(SloppyTNode context, - SloppyTNode value, - SloppyTNode done); - Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value); - - TNode ArraySpeciesCreate(TNode context, - TNode originalArray, - TNode len); - - void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index, - Node* to_index, RootIndex value_root_index, - ParameterMode mode = INTPTR_PARAMETERS); - - // Uses memset to effectively initialize the given FixedArray with zeroes. - void FillFixedArrayWithSmiZero(TNode array, - TNode length); - void FillFixedDoubleArrayWithZero(TNode array, - TNode length); - - void FillPropertyArrayWithUndefined(Node* array, Node* from_index, - Node* to_index, - ParameterMode mode = INTPTR_PARAMETERS); - - enum class DestroySource { kNo, kYes }; - - // Specify DestroySource::kYes if {from_array} is being supplanted by - // {to_array}. This offers a slight performance benefit by simply copying the - // array word by word. The source may be destroyed at the end of this macro. - // - // Otherwise, specify DestroySource::kNo for operations where an Object is - // being cloned, to ensure that MutableHeapNumbers are unique between the - // source and cloned object. - void CopyPropertyArrayValues(Node* from_array, Node* to_array, Node* length, - WriteBarrierMode barrier_mode, - ParameterMode mode, - DestroySource destroy_source); - - // Copies all elements from |from_array| of |length| size to - // |to_array| of the same size respecting the elements kind. - void CopyFixedArrayElements( - ElementsKind kind, Node* from_array, Node* to_array, Node* length, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - ParameterMode mode = INTPTR_PARAMETERS) { - CopyFixedArrayElements(kind, from_array, kind, to_array, - IntPtrOrSmiConstant(0, mode), length, length, - barrier_mode, mode); - } + TNode ArraySpeciesCreate(TNode context, + TNode originalArray, + TNode len); - // Copies |element_count| elements from |from_array| starting from element - // zero to |to_array| of |capacity| size respecting both array's elements - // kinds. - void CopyFixedArrayElements( - ElementsKind from_kind, Node* from_array, ElementsKind to_kind, - Node* to_array, Node* element_count, Node* capacity, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - ParameterMode mode = INTPTR_PARAMETERS) { - CopyFixedArrayElements(from_kind, from_array, to_kind, to_array, - IntPtrOrSmiConstant(0, mode), element_count, - capacity, barrier_mode, mode); - } + void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index, + Node* to_index, RootIndex value_root_index, + ParameterMode mode = INTPTR_PARAMETERS); - // Copies |element_count| elements from |from_array| starting from element - // |first_element| to |to_array| of |capacity| size respecting both array's - // elements kinds. - // |convert_holes| tells the function whether to convert holes to undefined. - // |var_holes_converted| can be used to signify that the conversion happened - // (i.e. that there were holes). If |convert_holes_to_undefined| is - // HoleConversionMode::kConvertToUndefined, then it must not be the case that - // IsDoubleElementsKind(to_kind). - void CopyFixedArrayElements( - ElementsKind from_kind, Node* from_array, ElementsKind to_kind, - Node* to_array, Node* first_element, Node* element_count, Node* capacity, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - ParameterMode mode = INTPTR_PARAMETERS, - HoleConversionMode convert_holes = HoleConversionMode::kDontConvert, - TVariable* var_holes_converted = nullptr); - - void CopyFixedArrayElements( - ElementsKind from_kind, TNode from_array, - ElementsKind to_kind, TNode to_array, - TNode first_element, TNode element_count, TNode capacity, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { - CopyFixedArrayElements(from_kind, from_array, to_kind, to_array, - first_element, element_count, capacity, barrier_mode, - SMI_PARAMETERS); - } + // Uses memset to effectively initialize the given FixedArray with zeroes. + void FillFixedArrayWithSmiZero(TNode array, + TNode length); + void FillFixedDoubleArrayWithZero(TNode array, + TNode length); + + void FillPropertyArrayWithUndefined(Node* array, Node* from_index, + Node* to_index, + ParameterMode mode = INTPTR_PARAMETERS); + + enum class DestroySource { kNo, kYes }; + + // Specify DestroySource::kYes if {from_array} is being supplanted by + // {to_array}. This offers a slight performance benefit by simply copying the + // array word by word. The source may be destroyed at the end of this macro. + // + // Otherwise, specify DestroySource::kNo for operations where an Object is + // being cloned, to ensure that MutableHeapNumbers are unique between the + // source and cloned object. + void CopyPropertyArrayValues(Node* from_array, Node* to_array, Node* length, + WriteBarrierMode barrier_mode, + ParameterMode mode, + DestroySource destroy_source); + + // Copies all elements from |from_array| of |length| size to + // |to_array| of the same size respecting the elements kind. + void CopyFixedArrayElements( + ElementsKind kind, Node* from_array, Node* to_array, Node* length, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + ParameterMode mode = INTPTR_PARAMETERS) { + CopyFixedArrayElements(kind, from_array, kind, to_array, + IntPtrOrSmiConstant(0, mode), length, length, + barrier_mode, mode); + } - void JumpIfPointersFromHereAreInteresting(TNode object, - Label* interesting); - - // Efficiently copy elements within a single array. The regions - // [src_index, src_index + length) and [dst_index, dst_index + length) - // can be overlapping. - void MoveElements(ElementsKind kind, TNode elements, - TNode dst_index, TNode src_index, - TNode length); - - // Efficiently copy elements from one array to another. The ElementsKind - // needs to be the same. Copy from src_elements at - // [src_index, src_index + length) to dst_elements at - // [dst_index, dst_index + length). - // The function decides whether it can use memcpy. In case it cannot, - // |write_barrier| can help it to skip write barrier. SKIP_WRITE_BARRIER is - // only safe when copying to new space, or when copying to old space and the - // array does not contain object pointers. - void CopyElements(ElementsKind kind, TNode dst_elements, - TNode dst_index, - TNode src_elements, - TNode src_index, TNode length, - WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER); - - TNode HeapObjectToFixedArray(TNode base, - Label* cast_fail); - - TNode HeapObjectToFixedDoubleArray(TNode base, - Label* cast_fail) { - GotoIf( - WordNotEqual(LoadMap(base), LoadRoot(RootIndex::kFixedDoubleArrayMap)), - cast_fail); - return UncheckedCast(base); - } + // Copies |element_count| elements from |from_array| starting from element + // zero to |to_array| of |capacity| size respecting both array's elements + // kinds. + void CopyFixedArrayElements( + ElementsKind from_kind, Node* from_array, ElementsKind to_kind, + Node* to_array, Node* element_count, Node* capacity, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + ParameterMode mode = INTPTR_PARAMETERS) { + CopyFixedArrayElements(from_kind, from_array, to_kind, to_array, + IntPtrOrSmiConstant(0, mode), element_count, + capacity, barrier_mode, mode); + } - TNode HeapObjectToSloppyArgumentsElements( - TNode base, Label* cast_fail) { - GotoIf(WordNotEqual(LoadMap(base), - LoadRoot(RootIndex::kSloppyArgumentsElementsMap)), - cast_fail); - return UncheckedCast(base); - } + // Copies |element_count| elements from |from_array| starting from element + // |first_element| to |to_array| of |capacity| size respecting both array's + // elements kinds. + // |convert_holes| tells the function whether to convert holes to undefined. + // |var_holes_converted| can be used to signify that the conversion happened + // (i.e. that there were holes). If |convert_holes_to_undefined| is + // HoleConversionMode::kConvertToUndefined, then it must not be the case that + // IsDoubleElementsKind(to_kind). + void CopyFixedArrayElements( + ElementsKind from_kind, Node* from_array, ElementsKind to_kind, + Node* to_array, Node* first_element, Node* element_count, Node* capacity, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, + ParameterMode mode = INTPTR_PARAMETERS, + HoleConversionMode convert_holes = HoleConversionMode::kDontConvert, + TVariable* var_holes_converted = nullptr); + + void CopyFixedArrayElements( + ElementsKind from_kind, TNode from_array, + ElementsKind to_kind, TNode to_array, + TNode first_element, TNode element_count, TNode capacity, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { + CopyFixedArrayElements(from_kind, from_array, to_kind, to_array, + first_element, element_count, capacity, barrier_mode, + SMI_PARAMETERS); + } - TNode ConvertElementsKindToInt(TNode elements_kind) { - return UncheckedCast(elements_kind); - } + void JumpIfPointersFromHereAreInteresting(TNode object, + Label* interesting); + + // Efficiently copy elements within a single array. The regions + // [src_index, src_index + length) and [dst_index, dst_index + length) + // can be overlapping. + void MoveElements(ElementsKind kind, TNode elements, + TNode dst_index, TNode src_index, + TNode length); + + // Efficiently copy elements from one array to another. The ElementsKind + // needs to be the same. Copy from src_elements at + // [src_index, src_index + length) to dst_elements at + // [dst_index, dst_index + length). + // The function decides whether it can use memcpy. In case it cannot, + // |write_barrier| can help it to skip write barrier. SKIP_WRITE_BARRIER is + // only safe when copying to new space, or when copying to old space and the + // array does not contain object pointers. + void CopyElements(ElementsKind kind, TNode dst_elements, + TNode dst_index, + TNode src_elements, + TNode src_index, TNode length, + WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER); + + TNode HeapObjectToFixedArray(TNode base, + Label* cast_fail); + + TNode HeapObjectToFixedDoubleArray(TNode base, + Label* cast_fail) { + GotoIf( + WordNotEqual(LoadMap(base), LoadRoot(RootIndex::kFixedDoubleArrayMap)), + cast_fail); + return UncheckedCast(base); + } - enum class ExtractFixedArrayFlag { - kFixedArrays = 1, - kFixedDoubleArrays = 2, - kDontCopyCOW = 4, - kNewSpaceAllocationOnly = 8, - kAllFixedArrays = kFixedArrays | kFixedDoubleArrays, - kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW - }; - - using ExtractFixedArrayFlags = base::Flags; - - // Copy a portion of an existing FixedArray or FixedDoubleArray into a new - // array, including special appropriate handling for empty arrays and COW - // arrays. The result array will be of the same type as the original array. - // - // * |source| is either a FixedArray or FixedDoubleArray from which to copy - // elements. - // * |first| is the starting element index to copy from, if nullptr is passed - // then index zero is used by default. - // * |count| is the number of elements to copy out of the source array - // starting from and including the element indexed by |start|. If |count| is - // nullptr, then all of the elements from |start| to the end of |source| are - // copied. - // * |capacity| determines the size of the allocated result array, with - // |capacity| >= |count|. If |capacity| is nullptr, then |count| is used as - // the destination array's capacity. - // * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both - // are detected and copied. Although it's always correct to pass - // kAllFixedArrays, the generated code is more compact and efficient if the - // caller can specify whether only FixedArrays or FixedDoubleArrays will be - // passed as the |source| parameter. - // * |parameter_mode| determines the parameter mode of |first|, |count| and - // |capacity|. - // * If |var_holes_converted| is given, any holes will be converted to - // undefined and the variable will be set according to whether or not there - // were any hole. - // * If |source_elements_kind| is given, the function will try to use the - // runtime elements kind of source to make copy faster. More specifically, it - // can skip write barriers. - TNode ExtractFixedArray( - Node* source, Node* first, Node* count = nullptr, - Node* capacity = nullptr, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - TVariable* var_holes_converted = nullptr, - Node* source_elements_kind = nullptr); - - TNode ExtractFixedArray( - TNode source, TNode first, TNode count, - TNode capacity, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays) { - return ExtractFixedArray(source, first, count, capacity, extract_flags, - SMI_PARAMETERS); - } + TNode HeapObjectToSloppyArgumentsElements( + TNode base, Label* cast_fail) { + GotoIf(WordNotEqual(LoadMap(base), + LoadRoot(RootIndex::kSloppyArgumentsElementsMap)), + cast_fail); + return UncheckedCast(base); + } - TNode ExtractFixedArray( - TNode source, TNode first, TNode count, - TNode capacity, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays) { - return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags, - INTPTR_PARAMETERS)); - } + TNode ConvertElementsKindToInt(TNode elements_kind) { + return UncheckedCast(elements_kind); + } - // Copy a portion of an existing FixedArray or FixedDoubleArray into a new - // FixedArray, including special appropriate handling for COW arrays. - // * |source| is either a FixedArray or FixedDoubleArray from which to copy - // elements. |source| is assumed to be non-empty. - // * |first| is the starting element index to copy from. - // * |count| is the number of elements to copy out of the source array - // starting from and including the element indexed by |start|. - // * |capacity| determines the size of the allocated result array, with - // |capacity| >= |count|. - // * |source_map| is the map of the |source|. - // * |from_kind| is the elements kind that is consistent with |source| being - // a FixedArray or FixedDoubleArray. This function only cares about double vs. - // non-double, so as to distinguish FixedDoubleArray vs. FixedArray. It does - // not care about holeyness. For example, when |source| is a FixedArray, - // PACKED/HOLEY_ELEMENTS can be used, but not PACKED_DOUBLE_ELEMENTS. - // * |allocation_flags| and |extract_flags| influence how the target - // FixedArray is allocated. - // * |parameter_mode| determines the parameter mode of |first|, |count| and - // |capacity|. - // * |convert_holes| is used to signify that the target array should use - // undefined in places of holes. - // * If |convert_holes| is true and |var_holes_converted| not nullptr, then - // |var_holes_converted| is used to signal whether any holes were found and - // converted. The caller should use this information to decide which map is - // compatible with the result array. For example, if the input was of - // HOLEY_SMI_ELEMENTS kind, and a conversion took place, the result will be - // compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS. - TNode ExtractToFixedArray( - Node* source, Node* first, Node* count, Node* capacity, Node* source_map, - ElementsKind from_kind = PACKED_ELEMENTS, - AllocationFlags allocation_flags = AllocationFlag::kNone, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - HoleConversionMode convert_holes = HoleConversionMode::kDontConvert, - TVariable* var_holes_converted = nullptr, - Node* source_runtime_kind = nullptr); - - // Attempt to copy a FixedDoubleArray to another FixedDoubleArray. In the case - // where the source array has a hole, produce a FixedArray instead where holes - // are replaced with undefined. - // * |source| is a FixedDoubleArray from which to copy elements. - // * |first| is the starting element index to copy from. - // * |count| is the number of elements to copy out of the source array - // starting from and including the element indexed by |start|. - // * |capacity| determines the size of the allocated result array, with - // |capacity| >= |count|. - // * |source_map| is the map of |source|. It will be used as the map of the - // target array if the target can stay a FixedDoubleArray. Otherwise if the - // target array needs to be a FixedArray, the FixedArrayMap will be used. - // * |var_holes_converted| is used to signal whether a FixedAray - // is produced or not. - // * |allocation_flags| and |extract_flags| influence how the target array is - // allocated. - // * |parameter_mode| determines the parameter mode of |first|, |count| and - // |capacity|. - TNode ExtractFixedDoubleArrayFillingHoles( - Node* source, Node* first, Node* count, Node* capacity, Node* source_map, - TVariable* var_holes_converted, AllocationFlags allocation_flags, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - // Copy the entire contents of a FixedArray or FixedDoubleArray to a new - // array, including special appropriate handling for empty arrays and COW - // arrays. - // - // * |source| is either a FixedArray or FixedDoubleArray from which to copy - // elements. - // * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both - // are detected and copied. Although it's always correct to pass - // kAllFixedArrays, the generated code is more compact and efficient if the - // caller can specify whether only FixedArrays or FixedDoubleArrays will be - // passed as the |source| parameter. - Node* CloneFixedArray(Node* source, - ExtractFixedArrayFlags flags = - ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) { - ParameterMode mode = OptimalParameterMode(); - return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr, - nullptr, flags, mode); - } + enum class ExtractFixedArrayFlag { + kFixedArrays = 1, + kFixedDoubleArrays = 2, + kDontCopyCOW = 4, + kNewSpaceAllocationOnly = 8, + kAllFixedArrays = kFixedArrays | kFixedDoubleArrays, + kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW + }; + + using ExtractFixedArrayFlags = base::Flags; + + // Copy a portion of an existing FixedArray or FixedDoubleArray into a new + // array, including special appropriate handling for empty arrays and COW + // arrays. The result array will be of the same type as the original array. + // + // * |source| is either a FixedArray or FixedDoubleArray from which to copy + // elements. + // * |first| is the starting element index to copy from, if nullptr is passed + // then index zero is used by default. + // * |count| is the number of elements to copy out of the source array + // starting from and including the element indexed by |start|. If |count| is + // nullptr, then all of the elements from |start| to the end of |source| are + // copied. + // * |capacity| determines the size of the allocated result array, with + // |capacity| >= |count|. If |capacity| is nullptr, then |count| is used as + // the destination array's capacity. + // * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both + // are detected and copied. Although it's always correct to pass + // kAllFixedArrays, the generated code is more compact and efficient if the + // caller can specify whether only FixedArrays or FixedDoubleArrays will be + // passed as the |source| parameter. + // * |parameter_mode| determines the parameter mode of |first|, |count| and + // |capacity|. + // * If |var_holes_converted| is given, any holes will be converted to + // undefined and the variable will be set according to whether or not there + // were any hole. + // * If |source_elements_kind| is given, the function will try to use the + // runtime elements kind of source to make copy faster. More specifically, it + // can skip write barriers. + TNode ExtractFixedArray( + Node* source, Node* first, Node* count = nullptr, + Node* capacity = nullptr, + ExtractFixedArrayFlags extract_flags = + ExtractFixedArrayFlag::kAllFixedArrays, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + TVariable* var_holes_converted = nullptr, + Node* source_elements_kind = nullptr); + + TNode ExtractFixedArray( + TNode source, TNode first, TNode count, + TNode capacity, + ExtractFixedArrayFlags extract_flags = + ExtractFixedArrayFlag::kAllFixedArrays) { + return ExtractFixedArray(source, first, count, capacity, extract_flags, + SMI_PARAMETERS); + } - // Copies |character_count| elements from |from_string| to |to_string| - // starting at the |from_index|'th character. |from_string| and |to_string| - // can either be one-byte strings or two-byte strings, although if - // |from_string| is two-byte, then |to_string| must be two-byte. - // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <= - // |from_index| <= |from_index| + |character_count| <= from_string.length and - // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length. - void CopyStringCharacters(Node* from_string, Node* to_string, - TNode from_index, TNode to_index, - TNode character_count, - String::Encoding from_encoding, - String::Encoding to_encoding); - - // Loads an element from |array| of |from_kind| elements by given |offset| - // (NOTE: not index!), does a hole check if |if_hole| is provided and - // converts the value so that it becomes ready for storing to array of - // |to_kind| elements. - Node* LoadElementAndPrepareForStore(Node* array, Node* offset, - ElementsKind from_kind, - ElementsKind to_kind, Label* if_hole); - - Node* CalculateNewElementsCapacity(Node* old_capacity, - ParameterMode mode = INTPTR_PARAMETERS); - - TNode CalculateNewElementsCapacity(TNode old_capacity) { - return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS)); - } - TNode CalculateNewElementsCapacity(TNode old_capacity) { - return UncheckedCast( - CalculateNewElementsCapacity(old_capacity, INTPTR_PARAMETERS)); - } + TNode ExtractFixedArray( + TNode source, TNode first, TNode count, + TNode capacity, + ExtractFixedArrayFlags extract_flags = + ExtractFixedArrayFlag::kAllFixedArrays) { + return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags, + INTPTR_PARAMETERS)); + } - // Tries to grow the |elements| array of given |object| to store the |key| - // or bails out if the growing gap is too big. Returns new elements. - Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, - Node* key, Label* bailout); - - // Tries to grow the |capacity|-length |elements| array of given |object| - // to store the |key| or bails out if the growing gap is too big. Returns - // new elements. - Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, - Node* key, Node* capacity, ParameterMode mode, - Label* bailout); - - // Grows elements capacity of given object. Returns new elements. - Node* GrowElementsCapacity(Node* object, Node* elements, - ElementsKind from_kind, ElementsKind to_kind, - Node* capacity, Node* new_capacity, - ParameterMode mode, Label* bailout); - - // Given a need to grow by |growth|, allocate an appropriate new capacity - // if necessary, and return a new elements FixedArray object. Label |bailout| - // is followed for allocation failure. - void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind, - Node* array, Node* length, - Variable* var_elements, Node* growth, - Label* bailout); - - // Allocation site manipulation - void InitializeAllocationMemento(Node* base_allocation, - Node* base_allocation_size, - Node* allocation_site); - - Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber); - Node* TruncateTaggedToFloat64(Node* context, Node* value); - Node* TruncateTaggedToWord32(Node* context, Node* value); - void TaggedToWord32OrBigInt(Node* context, Node* value, Label* if_number, - Variable* var_word32, Label* if_bigint, - Variable* var_bigint); - void TaggedToWord32OrBigIntWithFeedback( - Node* context, Node* value, Label* if_number, Variable* var_word32, - Label* if_bigint, Variable* var_bigint, Variable* var_feedback); - - // Truncate the floating point value of a HeapNumber to an Int32. - TNode TruncateHeapNumberValueToWord32(TNode object); - - // Conversions. - void TryHeapNumberToSmi(TNode number, - TVariable& output, // NOLINT(runtime/references) - Label* if_smi); - void TryFloat64ToSmi(TNode number, - TVariable& output, // NOLINT(runtime/references) - Label* if_smi); - TNode ChangeFloat64ToTagged(SloppyTNode value); - TNode ChangeInt32ToTagged(SloppyTNode value); - TNode ChangeUint32ToTagged(SloppyTNode value); - TNode ChangeUintPtrToTagged(TNode value); - TNode ChangeNumberToUint32(TNode value); - TNode ChangeNumberToFloat64(TNode value); - TNode TryNumberToUintPtr(TNode value, Label* if_negative); - TNode ChangeNonnegativeNumberToUintPtr(TNode value) { - return TryNumberToUintPtr(value, nullptr); - } + // Copy a portion of an existing FixedArray or FixedDoubleArray into a new + // FixedArray, including special appropriate handling for COW arrays. + // * |source| is either a FixedArray or FixedDoubleArray from which to copy + // elements. |source| is assumed to be non-empty. + // * |first| is the starting element index to copy from. + // * |count| is the number of elements to copy out of the source array + // starting from and including the element indexed by |start|. + // * |capacity| determines the size of the allocated result array, with + // |capacity| >= |count|. + // * |source_map| is the map of the |source|. + // * |from_kind| is the elements kind that is consistent with |source| being + // a FixedArray or FixedDoubleArray. This function only cares about double vs. + // non-double, so as to distinguish FixedDoubleArray vs. FixedArray. It does + // not care about holeyness. For example, when |source| is a FixedArray, + // PACKED/HOLEY_ELEMENTS can be used, but not PACKED_DOUBLE_ELEMENTS. + // * |allocation_flags| and |extract_flags| influence how the target + // FixedArray is allocated. + // * |parameter_mode| determines the parameter mode of |first|, |count| and + // |capacity|. + // * |convert_holes| is used to signify that the target array should use + // undefined in places of holes. + // * If |convert_holes| is true and |var_holes_converted| not nullptr, then + // |var_holes_converted| is used to signal whether any holes were found and + // converted. The caller should use this information to decide which map is + // compatible with the result array. For example, if the input was of + // HOLEY_SMI_ELEMENTS kind, and a conversion took place, the result will be + // compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS. + TNode ExtractToFixedArray( + Node* source, Node* first, Node* count, Node* capacity, Node* source_map, + ElementsKind from_kind = PACKED_ELEMENTS, + AllocationFlags allocation_flags = AllocationFlag::kNone, + ExtractFixedArrayFlags extract_flags = + ExtractFixedArrayFlag::kAllFixedArrays, + ParameterMode parameter_mode = INTPTR_PARAMETERS, + HoleConversionMode convert_holes = HoleConversionMode::kDontConvert, + TVariable* var_holes_converted = nullptr, + Node* source_runtime_kind = nullptr); + + // Attempt to copy a FixedDoubleArray to another FixedDoubleArray. In the case + // where the source array has a hole, produce a FixedArray instead where holes + // are replaced with undefined. + // * |source| is a FixedDoubleArray from which to copy elements. + // * |first| is the starting element index to copy from. + // * |count| is the number of elements to copy out of the source array + // starting from and including the element indexed by |start|. + // * |capacity| determines the size of the allocated result array, with + // |capacity| >= |count|. + // * |source_map| is the map of |source|. It will be used as the map of the + // target array if the target can stay a FixedDoubleArray. Otherwise if the + // target array needs to be a FixedArray, the FixedArrayMap will be used. + // * |var_holes_converted| is used to signal whether a FixedAray + // is produced or not. + // * |allocation_flags| and |extract_flags| influence how the target array is + // allocated. + // * |parameter_mode| determines the parameter mode of |first|, |count| and + // |capacity|. + TNode ExtractFixedDoubleArrayFillingHoles( + Node* source, Node* first, Node* count, Node* capacity, Node* source_map, + TVariable* var_holes_converted, AllocationFlags allocation_flags, + ExtractFixedArrayFlags extract_flags = + ExtractFixedArrayFlag::kAllFixedArrays, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + + // Copy the entire contents of a FixedArray or FixedDoubleArray to a new + // array, including special appropriate handling for empty arrays and COW + // arrays. + // + // * |source| is either a FixedArray or FixedDoubleArray from which to copy + // elements. + // * |extract_flags| determines whether FixedArrays, FixedDoubleArrays or both + // are detected and copied. Although it's always correct to pass + // kAllFixedArrays, the generated code is more compact and efficient if the + // caller can specify whether only FixedArrays or FixedDoubleArrays will be + // passed as the |source| parameter. + Node* CloneFixedArray(Node* source, + ExtractFixedArrayFlags flags = + ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) { + ParameterMode mode = OptimalParameterMode(); + return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr, + nullptr, flags, mode); + } - void TaggedToNumeric(Node* context, Node* value, Label* done, - Variable* var_numeric); - void TaggedToNumericWithFeedback(Node* context, Node* value, Label* done, - Variable* var_numeric, - Variable* var_feedback); + // Copies |character_count| elements from |from_string| to |to_string| + // starting at the |from_index|'th character. |from_string| and |to_string| + // can either be one-byte strings or two-byte strings, although if + // |from_string| is two-byte, then |to_string| must be two-byte. + // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <= + // |from_index| <= |from_index| + |character_count| <= from_string.length and + // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length. + void CopyStringCharacters(Node* from_string, Node* to_string, + TNode from_index, TNode to_index, + TNode character_count, + String::Encoding from_encoding, + String::Encoding to_encoding); + + // Loads an element from |array| of |from_kind| elements by given |offset| + // (NOTE: not index!), does a hole check if |if_hole| is provided and + // converts the value so that it becomes ready for storing to array of + // |to_kind| elements. + Node* LoadElementAndPrepareForStore(Node* array, Node* offset, + ElementsKind from_kind, + ElementsKind to_kind, Label* if_hole); + + Node* CalculateNewElementsCapacity(Node* old_capacity, + ParameterMode mode = INTPTR_PARAMETERS); + + TNode CalculateNewElementsCapacity(TNode old_capacity) { + return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS)); + } + TNode CalculateNewElementsCapacity(TNode old_capacity) { + return UncheckedCast( + CalculateNewElementsCapacity(old_capacity, INTPTR_PARAMETERS)); + } - TNode TimesSystemPointerSize(SloppyTNode value); - TNode TimesSystemPointerSize(TNode value) { - return Signed(TimesSystemPointerSize(implicit_cast>(value))); - } - TNode TimesSystemPointerSize(TNode value) { - return Unsigned(TimesSystemPointerSize(implicit_cast>(value))); - } + // Tries to grow the |elements| array of given |object| to store the |key| + // or bails out if the growing gap is too big. Returns new elements. + Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, + Node* key, Label* bailout); + + // Tries to grow the |capacity|-length |elements| array of given |object| + // to store the |key| or bails out if the growing gap is too big. Returns + // new elements. + Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, + Node* key, Node* capacity, ParameterMode mode, + Label* bailout); + + // Grows elements capacity of given object. Returns new elements. + Node* GrowElementsCapacity(Node* object, Node* elements, + ElementsKind from_kind, ElementsKind to_kind, + Node* capacity, Node* new_capacity, + ParameterMode mode, Label* bailout); + + // Given a need to grow by |growth|, allocate an appropriate new capacity + // if necessary, and return a new elements FixedArray object. Label |bailout| + // is followed for allocation failure. + void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind, + Node* array, Node* length, + Variable* var_elements, Node* growth, + Label* bailout); + + // Allocation site manipulation + void InitializeAllocationMemento(Node* base_allocation, + Node* base_allocation_size, + Node* allocation_site); + + Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber); + Node* TruncateTaggedToFloat64(Node* context, Node* value); + Node* TruncateTaggedToWord32(Node* context, Node* value); + void TaggedToWord32OrBigInt(Node* context, Node* value, Label* if_number, + Variable* var_word32, Label* if_bigint, + Variable* var_bigint); + void TaggedToWord32OrBigIntWithFeedback( + Node* context, Node* value, Label* if_number, Variable* var_word32, + Label* if_bigint, Variable* var_bigint, Variable* var_feedback); + + // Truncate the floating point value of a HeapNumber to an Int32. + TNode TruncateHeapNumberValueToWord32(TNode object); + + // Conversions. + void TryHeapNumberToSmi(TNode number, + TVariable& output, // NOLINT(runtime/references) + Label* if_smi); + void TryFloat64ToSmi(TNode number, + TVariable& output, // NOLINT(runtime/references) + Label* if_smi); + TNode ChangeFloat64ToTagged(SloppyTNode value); + TNode ChangeInt32ToTagged(SloppyTNode value); + TNode ChangeUint32ToTagged(SloppyTNode value); + TNode ChangeUintPtrToTagged(TNode value); + TNode ChangeNumberToUint32(TNode value); + TNode ChangeNumberToFloat64(TNode value); + TNode TryNumberToUintPtr(TNode value, Label* if_negative); + TNode ChangeNonnegativeNumberToUintPtr(TNode value) { + return TryNumberToUintPtr(value, nullptr); + } - TNode TimesTaggedSize(SloppyTNode value); - TNode TimesTaggedSize(TNode value) { - return Signed(TimesTaggedSize(implicit_cast>(value))); - } - TNode TimesTaggedSize(TNode value) { - return Unsigned(TimesTaggedSize(implicit_cast>(value))); - } + void TaggedToNumeric(Node* context, Node* value, Label* done, + Variable* var_numeric); + void TaggedToNumericWithFeedback(Node* context, Node* value, Label* done, + Variable* var_numeric, + Variable* var_feedback); - TNode TimesDoubleSize(SloppyTNode value); - TNode TimesDoubleSize(TNode value) { - return Unsigned(TimesDoubleSize(implicit_cast>(value))); - } - TNode TimesDoubleSize(TNode value) { - return Signed(TimesDoubleSize(implicit_cast>(value))); - } + TNode TimesSystemPointerSize(SloppyTNode value); + TNode TimesSystemPointerSize(TNode value) { + return Signed(TimesSystemPointerSize(implicit_cast>(value))); + } + TNode TimesSystemPointerSize(TNode value) { + return Unsigned(TimesSystemPointerSize(implicit_cast>(value))); + } - // Type conversions. - // Throws a TypeError for {method_name} if {value} is not coercible to Object, - // or returns the {value} converted to a String otherwise. - TNode ToThisString(TNode context, TNode value, - TNode method_name); - TNode ToThisString(TNode context, TNode value, - char const* method_name) { - return ToThisString(context, value, StringConstant(method_name)); - } + TNode TimesTaggedSize(SloppyTNode value); + TNode TimesTaggedSize(TNode value) { + return Signed(TimesTaggedSize(implicit_cast>(value))); + } + TNode TimesTaggedSize(TNode value) { + return Unsigned(TimesTaggedSize(implicit_cast>(value))); + } - // Throws a TypeError for {method_name} if {value} is neither of the given - // {primitive_type} nor a JSPrimitiveWrapper wrapping a value of - // {primitive_type}, or returns the {value} (or wrapped value) otherwise. - TNode ToThisValue(TNode context, TNode value, - PrimitiveType primitive_type, - char const* method_name); - - // Throws a TypeError for {method_name} if {value} is not of the given - // instance type. Returns {value}'s map. - Node* ThrowIfNotInstanceType(Node* context, Node* value, - InstanceType instance_type, - char const* method_name); - // Throws a TypeError for {method_name} if {value} is not a JSReceiver. - // Returns the {value}'s map. - Node* ThrowIfNotJSReceiver(Node* context, Node* value, - MessageTemplate msg_template, - const char* method_name = nullptr); - void ThrowIfNotCallable(TNode context, TNode value, - const char* method_name); - - void ThrowRangeError(Node* context, MessageTemplate message, - Node* arg0 = nullptr, Node* arg1 = nullptr, - Node* arg2 = nullptr); - void ThrowTypeError(Node* context, MessageTemplate message, - char const* arg0 = nullptr, char const* arg1 = nullptr); - void ThrowTypeError(Node* context, MessageTemplate message, Node* arg0, - Node* arg1 = nullptr, Node* arg2 = nullptr); - - // Type checks. - // Check whether the map is for an object with special properties, such as a - // JSProxy or an object with interceptors. - TNode InstanceTypeEqual(SloppyTNode instance_type, int type); - TNode IsAccessorInfo(SloppyTNode object); - TNode IsAccessorPair(SloppyTNode object); - TNode IsAllocationSite(SloppyTNode object); - TNode IsAnyHeapNumber(SloppyTNode object); - TNode IsNoElementsProtectorCellInvalid(); - TNode IsArrayIteratorProtectorCellInvalid(); - TNode IsBigIntInstanceType(SloppyTNode instance_type); - TNode IsBigInt(SloppyTNode object); - TNode IsBoolean(SloppyTNode object); - TNode IsCallableMap(SloppyTNode map); - TNode IsCallable(SloppyTNode object); - TNode TaggedIsCallable(TNode object); - TNode IsCell(SloppyTNode object); - TNode IsCode(SloppyTNode object); - TNode IsConsStringInstanceType(SloppyTNode instance_type); - TNode IsConstructorMap(SloppyTNode map); - TNode IsConstructor(SloppyTNode object); - TNode IsDebugInfo(TNode object); - TNode IsDeprecatedMap(SloppyTNode map); - TNode IsNameDictionary(SloppyTNode object); - TNode IsGlobalDictionary(SloppyTNode object); - TNode IsExtensibleMap(SloppyTNode map); - TNode IsFrozenOrSealedElementsKindMap(SloppyTNode map); - TNode IsExtensibleNonPrototypeMap(TNode map); - TNode IsExternalStringInstanceType(SloppyTNode instance_type); - TNode IsFeedbackCell(SloppyTNode object); - TNode IsFeedbackVector(SloppyTNode object); - TNode IsContext(SloppyTNode object); - TNode IsFixedArray(SloppyTNode object); - TNode IsFixedArraySubclass(SloppyTNode object); - TNode IsFixedArrayWithKind(SloppyTNode object, - ElementsKind kind); - TNode IsFixedArrayWithKindOrEmpty(SloppyTNode object, - ElementsKind kind); - TNode IsFixedDoubleArray(SloppyTNode object); - TNode IsFunctionWithPrototypeSlotMap(SloppyTNode map); - TNode IsHashTable(SloppyTNode object); - TNode IsEphemeronHashTable(SloppyTNode object); - TNode IsHeapNumber(SloppyTNode object); - TNode IsHeapNumberInstanceType(SloppyTNode instance_type); - TNode IsOddball(SloppyTNode object); - TNode IsOddballInstanceType(SloppyTNode instance_type); - TNode IsIndirectStringInstanceType(SloppyTNode instance_type); - TNode IsJSArrayBuffer(SloppyTNode object); - TNode IsJSDataView(TNode object); - TNode IsJSArrayInstanceType(SloppyTNode instance_type); - TNode IsJSArrayMap(SloppyTNode map); - TNode IsJSArray(SloppyTNode object); - TNode IsJSArrayIterator(SloppyTNode object); - TNode IsJSAsyncGeneratorObject(SloppyTNode object); - TNode IsJSFunctionInstanceType(SloppyTNode instance_type); - TNode IsAllocationSiteInstanceType(SloppyTNode instance_type); - TNode IsJSFunctionMap(SloppyTNode map); - TNode IsJSFunction(SloppyTNode object); - TNode IsJSGeneratorObject(SloppyTNode object); - TNode IsJSGlobalProxyInstanceType(SloppyTNode instance_type); - TNode IsJSGlobalProxyMap(SloppyTNode map); - TNode IsJSGlobalProxy(SloppyTNode object); - TNode IsJSObjectInstanceType(SloppyTNode instance_type); - TNode IsJSObjectMap(SloppyTNode map); - TNode IsJSObject(SloppyTNode object); - TNode IsJSPromiseMap(SloppyTNode map); - TNode IsJSPromise(SloppyTNode object); - TNode IsJSProxy(SloppyTNode object); - TNode IsJSStringIterator(SloppyTNode object); - TNode IsJSReceiverInstanceType(SloppyTNode instance_type); - TNode IsJSReceiverMap(SloppyTNode map); - TNode IsJSReceiver(SloppyTNode object); - TNode IsJSRegExp(SloppyTNode object); - TNode IsJSTypedArrayInstanceType(SloppyTNode instance_type); - TNode IsJSTypedArrayMap(SloppyTNode map); - TNode IsJSTypedArray(SloppyTNode object); - TNode IsJSPrimitiveWrapperInstanceType( - SloppyTNode instance_type); - TNode IsJSPrimitiveWrapperMap(SloppyTNode map); - TNode IsJSPrimitiveWrapper(SloppyTNode object); - TNode IsMap(SloppyTNode object); - TNode IsMutableHeapNumber(SloppyTNode object); - TNode IsName(SloppyTNode object); - TNode IsNameInstanceType(SloppyTNode instance_type); - TNode IsNativeContext(SloppyTNode object); - TNode IsNullOrJSReceiver(SloppyTNode object); - TNode IsNullOrUndefined(SloppyTNode object); - TNode IsNumberDictionary(SloppyTNode object); - TNode IsOneByteStringInstanceType(SloppyTNode instance_type); - TNode IsPrimitiveInstanceType(SloppyTNode instance_type); - TNode IsPrivateSymbol(SloppyTNode object); - TNode IsPrivateName(SloppyTNode symbol); - TNode IsPromiseCapability(SloppyTNode object); - TNode IsPropertyArray(SloppyTNode object); - TNode IsPropertyCell(SloppyTNode object); - TNode IsPrototypeInitialArrayPrototype(SloppyTNode context, - SloppyTNode map); - TNode IsPrototypeTypedArrayPrototype(SloppyTNode context, - SloppyTNode map); - - TNode IsFastAliasedArgumentsMap(TNode context, - TNode map); - TNode IsSlowAliasedArgumentsMap(TNode context, - TNode map); - TNode IsSloppyArgumentsMap(TNode context, TNode map); - TNode IsStrictArgumentsMap(TNode context, TNode map); - - TNode IsSequentialStringInstanceType( - SloppyTNode instance_type); - TNode IsUncachedExternalStringInstanceType( - SloppyTNode instance_type); - TNode IsSpecialReceiverInstanceType(TNode instance_type); - TNode IsCustomElementsReceiverInstanceType( - TNode instance_type); - TNode IsSpecialReceiverMap(SloppyTNode map); - // Returns true if the map corresponds to non-special fast or dictionary - // object. - TNode IsSimpleObjectMap(TNode map); - TNode IsStringInstanceType(SloppyTNode instance_type); - TNode IsString(SloppyTNode object); - TNode IsSymbolInstanceType(SloppyTNode instance_type); - TNode IsSymbol(SloppyTNode object); - TNode IsInternalizedStringInstanceType(TNode instance_type); - TNode IsUniqueName(TNode object); - TNode IsUniqueNameNoIndex(TNode object); - TNode IsUndetectableMap(SloppyTNode map); - TNode IsNotWeakFixedArraySubclass(SloppyTNode object); - TNode IsZeroOrContext(SloppyTNode object); - - inline Node* IsSharedFunctionInfo(Node* object) { - return IsSharedFunctionInfoMap(LoadMap(object)); - } + TNode TimesDoubleSize(SloppyTNode value); + TNode TimesDoubleSize(TNode value) { + return Unsigned(TimesDoubleSize(implicit_cast>(value))); + } + TNode TimesDoubleSize(TNode value) { + return Signed(TimesDoubleSize(implicit_cast>(value))); + } - TNode IsPromiseResolveProtectorCellInvalid(); - TNode IsPromiseThenProtectorCellInvalid(); - TNode IsArraySpeciesProtectorCellInvalid(); - TNode IsTypedArraySpeciesProtectorCellInvalid(); - TNode IsRegExpSpeciesProtectorCellInvalid( - TNode native_context); - TNode IsPromiseSpeciesProtectorCellInvalid(); - - TNode IsMockArrayBufferAllocatorFlag() { - TNode flag_value = UncheckedCast(Load( - MachineType::Uint8(), - ExternalConstant( - ExternalReference::address_of_mock_arraybuffer_allocator_flag()))); - return Word32NotEqual(Word32And(flag_value, Int32Constant(0xFF)), - Int32Constant(0)); - } + // Type conversions. + // Throws a TypeError for {method_name} if {value} is not coercible to Object, + // or returns the {value} converted to a String otherwise. + TNode ToThisString(TNode context, TNode value, + TNode method_name); + TNode ToThisString(TNode context, TNode value, + char const* method_name) { + return ToThisString(context, value, StringConstant(method_name)); + } - // True iff |object| is a Smi or a HeapNumber. - TNode IsNumber(SloppyTNode object); - // True iff |object| is a Smi or a HeapNumber or a BigInt. - TNode IsNumeric(SloppyTNode object); + // Throws a TypeError for {method_name} if {value} is neither of the given + // {primitive_type} nor a JSPrimitiveWrapper wrapping a value of + // {primitive_type}, or returns the {value} (or wrapped value) otherwise. + TNode ToThisValue(TNode context, TNode value, + PrimitiveType primitive_type, + char const* method_name); + + // Throws a TypeError for {method_name} if {value} is not of the given + // instance type. Returns {value}'s map. + Node* ThrowIfNotInstanceType(Node* context, Node* value, + InstanceType instance_type, + char const* method_name); + // Throws a TypeError for {method_name} if {value} is not a JSReceiver. + // Returns the {value}'s map. + Node* ThrowIfNotJSReceiver(Node* context, Node* value, + MessageTemplate msg_template, + const char* method_name = nullptr); + void ThrowIfNotCallable(TNode context, TNode value, + const char* method_name); + + void ThrowRangeError(Node* context, MessageTemplate message, + Node* arg0 = nullptr, Node* arg1 = nullptr, + Node* arg2 = nullptr); + void ThrowTypeError(Node* context, MessageTemplate message, + char const* arg0 = nullptr, char const* arg1 = nullptr); + void ThrowTypeError(Node* context, MessageTemplate message, Node* arg0, + Node* arg1 = nullptr, Node* arg2 = nullptr); + + // Type checks. + // Check whether the map is for an object with special properties, such as a + // JSProxy or an object with interceptors. + TNode InstanceTypeEqual(SloppyTNode instance_type, int type); + TNode IsAccessorInfo(SloppyTNode object); + TNode IsAccessorPair(SloppyTNode object); + TNode IsAllocationSite(SloppyTNode object); + TNode IsAnyHeapNumber(SloppyTNode object); + TNode IsNoElementsProtectorCellInvalid(); + TNode IsArrayIteratorProtectorCellInvalid(); + TNode IsBigIntInstanceType(SloppyTNode instance_type); + TNode IsBigInt(SloppyTNode object); + TNode IsBoolean(SloppyTNode object); + TNode IsCallableMap(SloppyTNode map); + TNode IsCallable(SloppyTNode object); + TNode TaggedIsCallable(TNode object); + TNode IsCell(SloppyTNode object); + TNode IsCode(SloppyTNode object); + TNode IsConsStringInstanceType(SloppyTNode instance_type); + TNode IsConstructorMap(SloppyTNode map); + TNode IsConstructor(SloppyTNode object); + TNode IsDebugInfo(TNode object); + TNode IsDeprecatedMap(SloppyTNode map); + TNode IsNameDictionary(SloppyTNode object); + TNode IsGlobalDictionary(SloppyTNode object); + TNode IsExtensibleMap(SloppyTNode map); + TNode IsFrozenOrSealedElementsKindMap(SloppyTNode map); + TNode IsExtensibleNonPrototypeMap(TNode map); + TNode IsExternalStringInstanceType(SloppyTNode instance_type); + TNode IsFeedbackCell(SloppyTNode object); + TNode IsFeedbackVector(SloppyTNode object); + TNode IsContext(SloppyTNode object); + TNode IsFixedArray(SloppyTNode object); + TNode IsFixedArraySubclass(SloppyTNode object); + TNode IsFixedArrayWithKind(SloppyTNode object, + ElementsKind kind); + TNode IsFixedArrayWithKindOrEmpty(SloppyTNode object, + ElementsKind kind); + TNode IsFixedDoubleArray(SloppyTNode object); + TNode IsFunctionWithPrototypeSlotMap(SloppyTNode map); + TNode IsHashTable(SloppyTNode object); + TNode IsEphemeronHashTable(SloppyTNode object); + TNode IsHeapNumber(SloppyTNode object); + TNode IsHeapNumberInstanceType(SloppyTNode instance_type); + TNode IsOddball(SloppyTNode object); + TNode IsOddballInstanceType(SloppyTNode instance_type); + TNode IsIndirectStringInstanceType(SloppyTNode instance_type); + TNode IsJSArrayBuffer(SloppyTNode object); + TNode IsJSDataView(TNode object); + TNode IsJSArrayInstanceType(SloppyTNode instance_type); + TNode IsJSArrayMap(SloppyTNode map); + TNode IsJSArray(SloppyTNode object); + TNode IsJSArrayIterator(SloppyTNode object); + TNode IsJSAsyncGeneratorObject(SloppyTNode object); + TNode IsJSFunctionInstanceType(SloppyTNode instance_type); + TNode IsAllocationSiteInstanceType(SloppyTNode instance_type); + TNode IsJSFunctionMap(SloppyTNode map); + TNode IsJSFunction(SloppyTNode object); + TNode IsJSGeneratorObject(SloppyTNode object); + TNode IsJSGlobalProxyInstanceType(SloppyTNode instance_type); + TNode IsJSGlobalProxyMap(SloppyTNode map); + TNode IsJSGlobalProxy(SloppyTNode object); + TNode IsJSObjectInstanceType(SloppyTNode instance_type); + TNode IsJSObjectMap(SloppyTNode map); + TNode IsJSObject(SloppyTNode object); + TNode IsJSPromiseMap(SloppyTNode map); + TNode IsJSPromise(SloppyTNode object); + TNode IsJSProxy(SloppyTNode object); + TNode IsJSStringIterator(SloppyTNode object); + TNode IsJSReceiverInstanceType(SloppyTNode instance_type); + TNode IsJSReceiverMap(SloppyTNode map); + TNode IsJSReceiver(SloppyTNode object); + TNode IsJSRegExp(SloppyTNode object); + TNode IsJSTypedArrayInstanceType(SloppyTNode instance_type); + TNode IsJSTypedArrayMap(SloppyTNode map); + TNode IsJSTypedArray(SloppyTNode object); + TNode IsJSPrimitiveWrapperInstanceType( + SloppyTNode instance_type); + TNode IsJSPrimitiveWrapperMap(SloppyTNode map); + TNode IsJSPrimitiveWrapper(SloppyTNode object); + TNode IsMap(SloppyTNode object); + TNode IsMutableHeapNumber(SloppyTNode object); + TNode IsName(SloppyTNode object); + TNode IsNameInstanceType(SloppyTNode instance_type); + TNode IsNativeContext(SloppyTNode object); + TNode IsNullOrJSReceiver(SloppyTNode object); + TNode IsNullOrUndefined(SloppyTNode object); + TNode IsNumberDictionary(SloppyTNode object); + TNode IsOneByteStringInstanceType(SloppyTNode instance_type); + TNode IsPrimitiveInstanceType(SloppyTNode instance_type); + TNode IsPrivateSymbol(SloppyTNode object); + TNode IsPrivateName(SloppyTNode symbol); + TNode IsPromiseCapability(SloppyTNode object); + TNode IsPropertyArray(SloppyTNode object); + TNode IsPropertyCell(SloppyTNode object); + TNode IsPrototypeInitialArrayPrototype(SloppyTNode context, + SloppyTNode map); + TNode IsPrototypeTypedArrayPrototype(SloppyTNode context, + SloppyTNode map); + + TNode IsFastAliasedArgumentsMap(TNode context, + TNode map); + TNode IsSlowAliasedArgumentsMap(TNode context, + TNode map); + TNode IsSloppyArgumentsMap(TNode context, TNode map); + TNode IsStrictArgumentsMap(TNode context, TNode map); + + TNode IsSequentialStringInstanceType( + SloppyTNode instance_type); + TNode IsUncachedExternalStringInstanceType( + SloppyTNode instance_type); + TNode IsSpecialReceiverInstanceType(TNode instance_type); + TNode IsCustomElementsReceiverInstanceType( + TNode instance_type); + TNode IsSpecialReceiverMap(SloppyTNode map); + // Returns true if the map corresponds to non-special fast or dictionary + // object. + TNode IsSimpleObjectMap(TNode map); + TNode IsStringInstanceType(SloppyTNode instance_type); + TNode IsString(SloppyTNode object); + TNode IsSymbolInstanceType(SloppyTNode instance_type); + TNode IsSymbol(SloppyTNode object); + TNode IsInternalizedStringInstanceType(TNode instance_type); + TNode IsUniqueName(TNode object); + TNode IsUniqueNameNoIndex(TNode object); + TNode IsUndetectableMap(SloppyTNode map); + TNode IsNotWeakFixedArraySubclass(SloppyTNode object); + TNode IsZeroOrContext(SloppyTNode object); + + inline Node* IsSharedFunctionInfo(Node* object) { + return IsSharedFunctionInfoMap(LoadMap(object)); + } - // True iff |number| is either a Smi, or a HeapNumber whose value is not - // within Smi range. - TNode IsNumberNormalized(SloppyTNode number); - TNode IsNumberPositive(SloppyTNode number); - TNode IsHeapNumberPositive(TNode number); + TNode IsPromiseResolveProtectorCellInvalid(); + TNode IsPromiseThenProtectorCellInvalid(); + TNode IsArraySpeciesProtectorCellInvalid(); + TNode IsTypedArraySpeciesProtectorCellInvalid(); + TNode IsRegExpSpeciesProtectorCellInvalid( + TNode native_context); + TNode IsPromiseSpeciesProtectorCellInvalid(); + + TNode IsMockArrayBufferAllocatorFlag() { + TNode flag_value = UncheckedCast(Load( + MachineType::Uint8(), + ExternalConstant( + ExternalReference::address_of_mock_arraybuffer_allocator_flag()))); + return Word32NotEqual(Word32And(flag_value, Int32Constant(0xFF)), + Int32Constant(0)); + } - // True iff {number} is non-negative and less or equal than 2**53-1. - TNode IsNumberNonNegativeSafeInteger(TNode number); + // True iff |object| is a Smi or a HeapNumber. + TNode IsNumber(SloppyTNode object); + // True iff |object| is a Smi or a HeapNumber or a BigInt. + TNode IsNumeric(SloppyTNode object); - // True iff {number} represents an integer value. - TNode IsInteger(TNode number); - TNode IsInteger(TNode number); + // True iff |number| is either a Smi, or a HeapNumber whose value is not + // within Smi range. + TNode IsNumberNormalized(SloppyTNode number); + TNode IsNumberPositive(SloppyTNode number); + TNode IsHeapNumberPositive(TNode number); - // True iff abs({number}) <= 2**53 -1 - TNode IsSafeInteger(TNode number); - TNode IsSafeInteger(TNode number); + // True iff {number} is non-negative and less or equal than 2**53-1. + TNode IsNumberNonNegativeSafeInteger(TNode number); - // True iff {number} represents a valid uint32t value. - TNode IsHeapNumberUint32(TNode number); + // True iff {number} represents an integer value. + TNode IsInteger(TNode number); + TNode IsInteger(TNode number); - // True iff {number} is a positive number and a valid array index in the range - // [0, 2^32-1). - TNode IsNumberArrayIndex(TNode number); + // True iff abs({number}) <= 2**53 -1 + TNode IsSafeInteger(TNode number); + TNode IsSafeInteger(TNode number); - Node* FixedArraySizeDoesntFitInNewSpace( - Node* element_count, int base_size = FixedArray::kHeaderSize, - ParameterMode mode = INTPTR_PARAMETERS); + // True iff {number} represents a valid uint32t value. + TNode IsHeapNumberUint32(TNode number); - // ElementsKind helpers: - TNode ElementsKindEqual(TNode a, TNode b) { - return Word32Equal(a, b); - } - bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; } - TNode IsFastElementsKind(TNode elements_kind); - bool IsFastElementsKind(ElementsKind kind) { - return v8::internal::IsFastElementsKind(kind); - } - TNode IsDictionaryElementsKind(TNode elements_kind) { - return ElementsKindEqual(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)); - } - TNode IsDoubleElementsKind(TNode elements_kind); - bool IsDoubleElementsKind(ElementsKind kind) { - return v8::internal::IsDoubleElementsKind(kind); - } - TNode IsFastSmiOrTaggedElementsKind(TNode elements_kind); - TNode IsFastSmiElementsKind(SloppyTNode elements_kind); - TNode IsHoleyFastElementsKind(TNode elements_kind); - TNode IsHoleyFastElementsKindForRead(TNode elements_kind); - TNode IsElementsKindGreaterThan(TNode target_kind, - ElementsKind reference_kind); - TNode IsElementsKindLessThanOrEqual(TNode target_kind, - ElementsKind reference_kind); - // Check if reference_kind_a <= target_kind <= reference_kind_b - TNode IsElementsKindInRange(TNode target_kind, - ElementsKind lower_reference_kind, - ElementsKind higher_reference_kind); - - // String helpers. - // Load a character from a String (might flatten a ConsString). - TNode StringCharCodeAt(SloppyTNode string, - SloppyTNode index); - // Return the single character string with only {code}. - TNode StringFromSingleCharCode(TNode code); - - // Return a new string object which holds a substring containing the range - // [from,to[ of string. - TNode SubString(TNode string, TNode from, - TNode to); - - // Return a new string object produced by concatenating |first| with |second|. - TNode StringAdd(Node* context, TNode first, - TNode second); - - // Check if |string| is an indirect (thin or flat cons) string type that can - // be dereferenced by DerefIndirectString. - void BranchIfCanDerefIndirectString(Node* string, Node* instance_type, - Label* can_deref, Label* cannot_deref); - // Unpack an indirect (thin or flat cons) string type. - void DerefIndirectString(Variable* var_string, Node* instance_type); - // Check if |var_string| has an indirect (thin or flat cons) string type, - // and unpack it if so. - void MaybeDerefIndirectString(Variable* var_string, Node* instance_type, - Label* did_deref, Label* cannot_deref); - // Check if |var_left| or |var_right| has an indirect (thin or flat cons) - // string type, and unpack it/them if so. Fall through if nothing was done. - void MaybeDerefIndirectStrings(Variable* var_left, Node* left_instance_type, - Variable* var_right, Node* right_instance_type, - Label* did_something); - Node* DerefIndirectString(TNode string, TNode instance_type, - Label* cannot_deref); - - TNode StringFromSingleUTF16EncodedCodePoint(TNode codepoint); - - // Type conversion helpers. - enum class BigIntHandling { kConvertToNumber, kThrow }; - // Convert a String to a Number. - TNode StringToNumber(TNode input); - // Convert a Number to a String. - TNode NumberToString(TNode input); - // Convert a Non-Number object to a Number. - TNode NonNumberToNumber( - SloppyTNode context, SloppyTNode input, - BigIntHandling bigint_handling = BigIntHandling::kThrow); - // Convert a Non-Number object to a Numeric. - TNode NonNumberToNumeric(SloppyTNode context, - SloppyTNode input); - // Convert any object to a Number. - // Conforms to ES#sec-tonumber if {bigint_handling} == kThrow. - // With {bigint_handling} == kConvertToNumber, matches behavior of - // tc39.github.io/proposal-bigint/#sec-number-constructor-number-value. - TNode ToNumber( - SloppyTNode context, SloppyTNode input, - BigIntHandling bigint_handling = BigIntHandling::kThrow); - TNode ToNumber_Inline(SloppyTNode context, - SloppyTNode input); - - // Try to convert an object to a BigInt. Throws on failure (e.g. for Numbers). - // https://tc39.github.io/proposal-bigint/#sec-to-bigint - TNode ToBigInt(SloppyTNode context, - SloppyTNode input); - - // Converts |input| to one of 2^32 integer values in the range 0 through - // 2^32-1, inclusive. - // ES#sec-touint32 - TNode ToUint32(SloppyTNode context, - SloppyTNode input); - - // Convert any object to a String. - TNode ToString_Inline(SloppyTNode context, - SloppyTNode input); - - // Convert any object to a Primitive. - Node* JSReceiverToPrimitive(Node* context, Node* input); - - TNode ToObject(SloppyTNode context, - SloppyTNode input); - - // Same as ToObject but avoids the Builtin call if |input| is already a - // JSReceiver. - TNode ToObject_Inline(TNode context, - TNode input); - - enum ToIntegerTruncationMode { - kNoTruncation, - kTruncateMinusZero, - }; - - // ES6 7.1.17 ToIndex, but jumps to range_error if the result is not a Smi. - TNode ToSmiIndex(TNode context, TNode input, - Label* range_error); - - // ES6 7.1.15 ToLength, but jumps to range_error if the result is not a Smi. - TNode ToSmiLength(TNode context, TNode input, - Label* range_error); - - // ES6 7.1.15 ToLength, but with inlined fast path. - TNode ToLength_Inline(SloppyTNode context, - SloppyTNode input); - - // ES6 7.1.4 ToInteger ( argument ) - TNode ToInteger_Inline(SloppyTNode context, - SloppyTNode input, - ToIntegerTruncationMode mode = kNoTruncation); - TNode ToInteger(SloppyTNode context, - SloppyTNode input, - ToIntegerTruncationMode mode = kNoTruncation); - - // Returns a node that contains a decoded (unsigned!) value of a bit - // field |BitField| in |word32|. Returns result as an uint32 node. - template - TNode DecodeWord32(SloppyTNode word32) { - return DecodeWord32(word32, BitField::kShift, BitField::kMask); - } + // True iff {number} is a positive number and a valid array index in the range + // [0, 2^32-1). + TNode IsNumberArrayIndex(TNode number); - // Returns a node that contains a decoded (unsigned!) value of a bit - // field |BitField| in |word|. Returns result as a word-size node. - template - TNode DecodeWord(SloppyTNode word) { - return DecodeWord(word, BitField::kShift, BitField::kMask); - } + Node* FixedArraySizeDoesntFitInNewSpace( + Node* element_count, int base_size = FixedArray::kHeaderSize, + ParameterMode mode = INTPTR_PARAMETERS); - // Returns a node that contains a decoded (unsigned!) value of a bit - // field |BitField| in |word32|. Returns result as a word-size node. - template - TNode DecodeWordFromWord32(SloppyTNode word32) { - return DecodeWord(ChangeUint32ToWord(word32)); - } + // ElementsKind helpers: + TNode ElementsKindEqual(TNode a, TNode b) { + return Word32Equal(a, b); + } + bool ElementsKindEqual(ElementsKind a, ElementsKind b) { + return a == b; + } + TNode IsFastElementsKind(TNode elements_kind); + bool IsFastElementsKind(ElementsKind kind) { + return v8::internal::IsFastElementsKind(kind); + } + TNode IsDictionaryElementsKind(TNode elements_kind) { + return ElementsKindEqual(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)); + } + TNode IsDoubleElementsKind(TNode elements_kind); + bool IsDoubleElementsKind(ElementsKind kind) { + return v8::internal::IsDoubleElementsKind(kind); + } + TNode IsFastSmiOrTaggedElementsKind(TNode elements_kind); + TNode IsFastSmiElementsKind(SloppyTNode elements_kind); + TNode IsHoleyFastElementsKind(TNode elements_kind); + TNode IsHoleyFastElementsKindForRead(TNode elements_kind); + TNode IsElementsKindGreaterThan(TNode target_kind, + ElementsKind reference_kind); + TNode IsElementsKindLessThanOrEqual(TNode target_kind, + ElementsKind reference_kind); + // Check if reference_kind_a <= target_kind <= reference_kind_b + TNode IsElementsKindInRange(TNode target_kind, + ElementsKind lower_reference_kind, + ElementsKind higher_reference_kind); + + // String helpers. + // Load a character from a String (might flatten a ConsString). + TNode StringCharCodeAt(SloppyTNode string, + SloppyTNode index); + // Return the single character string with only {code}. + TNode StringFromSingleCharCode(TNode code); + + // Return a new string object which holds a substring containing the range + // [from,to[ of string. + TNode SubString(TNode string, TNode from, + TNode to); + + // Return a new string object produced by concatenating |first| with |second|. + TNode StringAdd(Node* context, TNode first, + TNode second); + + // Check if |string| is an indirect (thin or flat cons) string type that can + // be dereferenced by DerefIndirectString. + void BranchIfCanDerefIndirectString(Node* string, Node* instance_type, + Label* can_deref, Label* cannot_deref); + // Unpack an indirect (thin or flat cons) string type. + void DerefIndirectString(Variable* var_string, Node* instance_type); + // Check if |var_string| has an indirect (thin or flat cons) string type, + // and unpack it if so. + void MaybeDerefIndirectString(Variable* var_string, Node* instance_type, + Label* did_deref, Label* cannot_deref); + // Check if |var_left| or |var_right| has an indirect (thin or flat cons) + // string type, and unpack it/them if so. Fall through if nothing was done. + void MaybeDerefIndirectStrings(Variable* var_left, Node* left_instance_type, + Variable* var_right, Node* right_instance_type, + Label* did_something); + Node* DerefIndirectString(TNode string, TNode instance_type, + Label* cannot_deref); + + TNode StringFromSingleUTF16EncodedCodePoint(TNode codepoint); + + // Type conversion helpers. + enum class BigIntHandling { kConvertToNumber, kThrow }; + // Convert a String to a Number. + TNode StringToNumber(TNode input); + // Convert a Number to a String. + TNode NumberToString(TNode input); + // Convert a Non-Number object to a Number. + TNode NonNumberToNumber( + SloppyTNode context, SloppyTNode input, + BigIntHandling bigint_handling = BigIntHandling::kThrow); + // Convert a Non-Number object to a Numeric. + TNode NonNumberToNumeric(SloppyTNode context, + SloppyTNode input); + // Convert any object to a Number. + // Conforms to ES#sec-tonumber if {bigint_handling} == kThrow. + // With {bigint_handling} == kConvertToNumber, matches behavior of + // tc39.github.io/proposal-bigint/#sec-number-constructor-number-value. + TNode ToNumber( + SloppyTNode context, SloppyTNode input, + BigIntHandling bigint_handling = BigIntHandling::kThrow); + TNode ToNumber_Inline(SloppyTNode context, + SloppyTNode input); + + // Try to convert an object to a BigInt. Throws on failure (e.g. for Numbers). + // https://tc39.github.io/proposal-bigint/#sec-to-bigint + TNode ToBigInt(SloppyTNode context, + SloppyTNode input); + + // Converts |input| to one of 2^32 integer values in the range 0 through + // 2^32-1, inclusive. + // ES#sec-touint32 + TNode ToUint32(SloppyTNode context, + SloppyTNode input); + + // Convert any object to a String. + TNode ToString_Inline(SloppyTNode context, + SloppyTNode input); + + // Convert any object to a Primitive. + Node* JSReceiverToPrimitive(Node* context, Node* input); + + TNode ToObject(SloppyTNode context, + SloppyTNode input); + + // Same as ToObject but avoids the Builtin call if |input| is already a + // JSReceiver. + TNode ToObject_Inline(TNode context, + TNode input); + + enum ToIntegerTruncationMode { + kNoTruncation, + kTruncateMinusZero, + }; + + // ES6 7.1.17 ToIndex, but jumps to range_error if the result is not a Smi. + TNode ToSmiIndex(TNode context, TNode input, + Label* range_error); + + // ES6 7.1.15 ToLength, but jumps to range_error if the result is not a Smi. + TNode ToSmiLength(TNode context, TNode input, + Label* range_error); + + // ES6 7.1.15 ToLength, but with inlined fast path. + TNode ToLength_Inline(SloppyTNode context, + SloppyTNode input); + + // ES6 7.1.4 ToInteger ( argument ) + TNode ToInteger_Inline(SloppyTNode context, + SloppyTNode input, + ToIntegerTruncationMode mode = kNoTruncation); + TNode ToInteger(SloppyTNode context, + SloppyTNode input, + ToIntegerTruncationMode mode = kNoTruncation); + + // Returns a node that contains a decoded (unsigned!) value of a bit + // field |BitField| in |word32|. Returns result as an uint32 node. + template + TNode DecodeWord32(SloppyTNode word32) { + return DecodeWord32(word32, BitField::kShift, BitField::kMask); + } - // Returns a node that contains a decoded (unsigned!) value of a bit - // field |BitField| in |word|. Returns result as an uint32 node. - template - TNode DecodeWord32FromWord(SloppyTNode word) { - return UncheckedCast( - TruncateIntPtrToInt32(Signed(DecodeWord(word)))); - } + // Returns a node that contains a decoded (unsigned!) value of a bit + // field |BitField| in |word|. Returns result as a word-size node. + template + TNode DecodeWord(SloppyTNode word) { + return DecodeWord(word, BitField::kShift, BitField::kMask); + } - // Decodes an unsigned (!) value from |word32| to an uint32 node. - TNode DecodeWord32(SloppyTNode word32, uint32_t shift, - uint32_t mask); + // Returns a node that contains a decoded (unsigned!) value of a bit + // field |BitField| in |word32|. Returns result as a word-size node. + template + TNode DecodeWordFromWord32(SloppyTNode word32) { + return DecodeWord(ChangeUint32ToWord(word32)); + } - // Decodes an unsigned (!) value from |word| to a word-size node. - TNode DecodeWord(SloppyTNode word, uint32_t shift, - uint32_t mask); + // Returns a node that contains a decoded (unsigned!) value of a bit + // field |BitField| in |word|. Returns result as an uint32 node. + template + TNode DecodeWord32FromWord(SloppyTNode word) { + return UncheckedCast( + TruncateIntPtrToInt32(Signed(DecodeWord(word)))); + } - // Returns a node that contains the updated values of a |BitField|. - template - TNode UpdateWord(TNode word, TNode value) { - return UpdateWord(word, value, BitField::kShift, BitField::kMask); - } + // Decodes an unsigned (!) value from |word32| to an uint32 node. + TNode DecodeWord32(SloppyTNode word32, uint32_t shift, + uint32_t mask); - // Returns a node that contains the updated {value} inside {word} starting - // at {shift} and fitting in {mask}. - TNode UpdateWord(TNode word, TNode value, uint32_t shift, - uint32_t mask); + // Decodes an unsigned (!) value from |word| to a word-size node. + TNode DecodeWord(SloppyTNode word, uint32_t shift, + uint32_t mask); - // Returns true if any of the |T|'s bits in given |word32| are set. - template - TNode IsSetWord32(SloppyTNode word32) { - return IsSetWord32(word32, T::kMask); - } + // Returns a node that contains the updated values of a |BitField|. + template + TNode UpdateWord(TNode word, TNode value) { + return UpdateWord(word, value, BitField::kShift, BitField::kMask); + } - // Returns true if any of the mask's bits in given |word32| are set. - TNode IsSetWord32(SloppyTNode word32, uint32_t mask) { - return Word32NotEqual(Word32And(word32, Int32Constant(mask)), - Int32Constant(0)); - } + // Returns a node that contains the updated {value} inside {word} starting + // at {shift} and fitting in {mask}. + TNode UpdateWord(TNode word, TNode value, uint32_t shift, + uint32_t mask); - // Returns true if none of the mask's bits in given |word32| are set. - TNode IsNotSetWord32(SloppyTNode word32, uint32_t mask) { - return Word32Equal(Word32And(word32, Int32Constant(mask)), - Int32Constant(0)); - } + // Returns true if any of the |T|'s bits in given |word32| are set. + template + TNode IsSetWord32(SloppyTNode word32) { + return IsSetWord32(word32, T::kMask); + } - // Returns true if all of the mask's bits in a given |word32| are set. - TNode IsAllSetWord32(SloppyTNode word32, uint32_t mask) { - TNode const_mask = Int32Constant(mask); - return Word32Equal(Word32And(word32, const_mask), const_mask); - } + // Returns true if any of the mask's bits in given |word32| are set. + TNode IsSetWord32(SloppyTNode word32, uint32_t mask) { + return Word32NotEqual(Word32And(word32, Int32Constant(mask)), + Int32Constant(0)); + } - // Returns true if any of the |T|'s bits in given |word| are set. - template - TNode IsSetWord(SloppyTNode word) { - return IsSetWord(word, T::kMask); - } + // Returns true if none of the mask's bits in given |word32| are set. + TNode IsNotSetWord32(SloppyTNode word32, uint32_t mask) { + return Word32Equal(Word32And(word32, Int32Constant(mask)), + Int32Constant(0)); + } - // Returns true if any of the mask's bits in given |word| are set. - TNode IsSetWord(SloppyTNode word, uint32_t mask) { - return WordNotEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0)); - } + // Returns true if all of the mask's bits in a given |word32| are set. + TNode IsAllSetWord32(SloppyTNode word32, uint32_t mask) { + TNode const_mask = Int32Constant(mask); + return Word32Equal(Word32And(word32, const_mask), const_mask); + } - // Returns true if any of the mask's bit are set in the given Smi. - // Smi-encoding of the mask is performed implicitly! - TNode IsSetSmi(SloppyTNode smi, int untagged_mask) { - intptr_t mask_word = bit_cast(Smi::FromInt(untagged_mask)); - return WordNotEqual( - WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)), - IntPtrConstant(0)); - } + // Returns true if any of the |T|'s bits in given |word| are set. + template + TNode IsSetWord(SloppyTNode word) { + return IsSetWord(word, T::kMask); + } - // Returns true if all of the |T|'s bits in given |word32| are clear. - template - TNode IsClearWord32(SloppyTNode word32) { - return IsClearWord32(word32, T::kMask); - } + // Returns true if any of the mask's bits in given |word| are set. + TNode IsSetWord(SloppyTNode word, uint32_t mask) { + return WordNotEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0)); + } - // Returns true if all of the mask's bits in given |word32| are clear. - TNode IsClearWord32(SloppyTNode word32, uint32_t mask) { - return Word32Equal(Word32And(word32, Int32Constant(mask)), - Int32Constant(0)); - } + // Returns true if any of the mask's bit are set in the given Smi. + // Smi-encoding of the mask is performed implicitly! + TNode IsSetSmi(SloppyTNode smi, int untagged_mask) { + intptr_t mask_word = bit_cast(Smi::FromInt(untagged_mask)); + return WordNotEqual( + WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)), + IntPtrConstant(0)); + } - // Returns true if all of the |T|'s bits in given |word| are clear. - template - TNode IsClearWord(SloppyTNode word) { - return IsClearWord(word, T::kMask); - } + // Returns true if all of the |T|'s bits in given |word32| are clear. + template + TNode IsClearWord32(SloppyTNode word32) { + return IsClearWord32(word32, T::kMask); + } - // Returns true if all of the mask's bits in given |word| are clear. - TNode IsClearWord(SloppyTNode word, uint32_t mask) { - return WordEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0)); - } + // Returns true if all of the mask's bits in given |word32| are clear. + TNode IsClearWord32(SloppyTNode word32, uint32_t mask) { + return Word32Equal(Word32And(word32, Int32Constant(mask)), + Int32Constant(0)); + } - void SetCounter(StatsCounter* counter, int value); - void IncrementCounter(StatsCounter* counter, int delta); - void DecrementCounter(StatsCounter* counter, int delta); + // Returns true if all of the |T|'s bits in given |word| are clear. + template + TNode IsClearWord(SloppyTNode word) { + return IsClearWord(word, T::kMask); + } - void Increment(Variable* variable, int value = 1, - ParameterMode mode = INTPTR_PARAMETERS); - void Decrement(Variable* variable, int value = 1, - ParameterMode mode = INTPTR_PARAMETERS) { - Increment(variable, -value, mode); - } + // Returns true if all of the mask's bits in given |word| are clear. + TNode IsClearWord(SloppyTNode word, uint32_t mask) { + return WordEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0)); + } - // Generates "if (false) goto label" code. Useful for marking a label as - // "live" to avoid assertion failures during graph building. In the resulting - // code this check will be eliminated. - void Use(Label* label); - - // Various building blocks for stubs doing property lookups. - - // |if_notinternalized| is optional; |if_bailout| will be used by default. - // Note: If |key| does not yet have a hash, |if_notinternalized| will be taken - // even if |key| is an array index. |if_keyisunique| will never - // be taken for array indices. - void TryToName(Node* key, Label* if_keyisindex, Variable* var_index, - Label* if_keyisunique, Variable* var_unique, Label* if_bailout, - Label* if_notinternalized = nullptr); - - // Performs a hash computation and string table lookup for the given string, - // and jumps to: - // - |if_index| if the string is an array index like "123"; |var_index| - // will contain the intptr representation of that index. - // - |if_internalized| if the string exists in the string table; the - // internalized version will be in |var_internalized|. - // - |if_not_internalized| if the string is not in the string table (but - // does not add it). - // - |if_bailout| for unsupported cases (e.g. uncachable array index). - void TryInternalizeString(Node* string, Label* if_index, Variable* var_index, - Label* if_internalized, Variable* var_internalized, - Label* if_not_internalized, Label* if_bailout); - - // Calculates array index for given dictionary entry and entry field. - // See Dictionary::EntryToIndex(). - template - V8_EXPORT_PRIVATE TNode EntryToIndex(TNode entry, - int field_index); - template - V8_EXPORT_PRIVATE TNode EntryToIndex(TNode entry) { - return EntryToIndex(entry, Dictionary::kEntryKeyIndex); - } + void SetCounter(StatsCounter* counter, int value); + void IncrementCounter(StatsCounter* counter, int delta); + void DecrementCounter(StatsCounter* counter, int delta); - // Loads the details for the entry with the given key_index. - // Returns an untagged int32. - template - TNode LoadDetailsByKeyIndex(Node* container, Node* key_index) { - static_assert(!std::is_same::value, - "Use the non-templatized version for DescriptorArray"); - const int kKeyToDetailsOffset = - (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) * - kTaggedSize; - return Unsigned(LoadAndUntagToWord32FixedArrayElement( - CAST(container), key_index, kKeyToDetailsOffset)); - } + void Increment(Variable* variable, int value = 1, + ParameterMode mode = INTPTR_PARAMETERS); + void Decrement(Variable* variable, int value = 1, + ParameterMode mode = INTPTR_PARAMETERS) { + Increment(variable, -value, mode); + } - // Loads the value for the entry with the given key_index. - // Returns a tagged value. - template - TNode LoadValueByKeyIndex(Node* container, Node* key_index) { - static_assert(!std::is_same::value, - "Use the non-templatized version for DescriptorArray"); - const int kKeyToValueOffset = - (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) * - kTaggedSize; - return LoadFixedArrayElement(CAST(container), key_index, kKeyToValueOffset); - } + // Generates "if (false) goto label" code. Useful for marking a label as + // "live" to avoid assertion failures during graph building. In the resulting + // code this check will be eliminated. + void Use(Label* label); + + // Various building blocks for stubs doing property lookups. + + // |if_notinternalized| is optional; |if_bailout| will be used by default. + // Note: If |key| does not yet have a hash, |if_notinternalized| will be taken + // even if |key| is an array index. |if_keyisunique| will never + // be taken for array indices. + void TryToName(Node* key, Label* if_keyisindex, Variable* var_index, + Label* if_keyisunique, Variable* var_unique, Label* if_bailout, + Label* if_notinternalized = nullptr); + + // Performs a hash computation and string table lookup for the given string, + // and jumps to: + // - |if_index| if the string is an array index like "123"; |var_index| + // will contain the intptr representation of that index. + // - |if_internalized| if the string exists in the string table; the + // internalized version will be in |var_internalized|. + // - |if_not_internalized| if the string is not in the string table (but + // does not add it). + // - |if_bailout| for unsupported cases (e.g. uncachable array index). + void TryInternalizeString(Node* string, Label* if_index, Variable* var_index, + Label* if_internalized, Variable* var_internalized, + Label* if_not_internalized, Label* if_bailout); + + // Calculates array index for given dictionary entry and entry field. + // See Dictionary::EntryToIndex(). + template + V8_EXPORT_PRIVATE TNode EntryToIndex(TNode entry, + int field_index); + template + V8_EXPORT_PRIVATE TNode EntryToIndex(TNode entry) { + return EntryToIndex(entry, Dictionary::kEntryKeyIndex); + } - // Stores the details for the entry with the given key_index. - // |details| must be a Smi. - template - void StoreDetailsByKeyIndex(TNode container, - TNode key_index, TNode details) { - const int kKeyToDetailsOffset = - (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) * - kTaggedSize; - StoreFixedArrayElement(container, key_index, details, SKIP_WRITE_BARRIER, - kKeyToDetailsOffset); - } + // Loads the details for the entry with the given key_index. + // Returns an untagged int32. + template + TNode LoadDetailsByKeyIndex(Node* container, Node* key_index) { + static_assert(!std::is_same::value, + "Use the non-templatized version for DescriptorArray"); + const int kKeyToDetailsOffset = + (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) * + kTaggedSize; + return Unsigned(LoadAndUntagToWord32FixedArrayElement( + CAST(container), key_index, kKeyToDetailsOffset)); + } - // Stores the value for the entry with the given key_index. - template - void StoreValueByKeyIndex( - TNode container, TNode key_index, - TNode value, - WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER) { - const int kKeyToValueOffset = - (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) * - kTaggedSize; - StoreFixedArrayElement(container, key_index, value, write_barrier, - kKeyToValueOffset); - } + // Loads the value for the entry with the given key_index. + // Returns a tagged value. + template + TNode LoadValueByKeyIndex(Node* container, Node* key_index) { + static_assert(!std::is_same::value, + "Use the non-templatized version for DescriptorArray"); + const int kKeyToValueOffset = + (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) * + kTaggedSize; + return LoadFixedArrayElement(CAST(container), key_index, kKeyToValueOffset); + } - // Calculate a valid size for the a hash table. - TNode HashTableComputeCapacity(TNode at_least_space_for); + // Stores the details for the entry with the given key_index. + // |details| must be a Smi. + template + void StoreDetailsByKeyIndex(TNode container, + TNode key_index, TNode details) { + const int kKeyToDetailsOffset = + (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) * + kTaggedSize; + StoreFixedArrayElement(container, key_index, details, SKIP_WRITE_BARRIER, + kKeyToDetailsOffset); + } - template - TNode GetNumberOfElements(TNode dictionary) { - return CAST( - LoadFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex)); - } + // Stores the value for the entry with the given key_index. + template + void StoreValueByKeyIndex( + TNode container, TNode key_index, + TNode value, + WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER) { + const int kKeyToValueOffset = + (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) * + kTaggedSize; + StoreFixedArrayElement(container, key_index, value, write_barrier, + kKeyToValueOffset); + } - TNode GetNumberDictionaryNumberOfElements( - TNode dictionary) { - return GetNumberOfElements(dictionary); - } + // Calculate a valid size for the a hash table. + TNode HashTableComputeCapacity(TNode at_least_space_for); - template - void SetNumberOfElements(TNode dictionary, - TNode num_elements_smi) { - StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex, - num_elements_smi, SKIP_WRITE_BARRIER); - } + template + TNode GetNumberOfElements(TNode dictionary) { + return CAST( + LoadFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex)); + } - template - TNode GetNumberOfDeletedElements(TNode dictionary) { - return CAST(LoadFixedArrayElement( - dictionary, Dictionary::kNumberOfDeletedElementsIndex)); - } + TNode GetNumberDictionaryNumberOfElements( + TNode dictionary) { + return GetNumberOfElements(dictionary); + } - template - void SetNumberOfDeletedElements(TNode dictionary, - TNode num_deleted_smi) { - StoreFixedArrayElement(dictionary, - Dictionary::kNumberOfDeletedElementsIndex, - num_deleted_smi, SKIP_WRITE_BARRIER); - } + template + void SetNumberOfElements(TNode dictionary, + TNode num_elements_smi) { + StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex, + num_elements_smi, SKIP_WRITE_BARRIER); + } - template - TNode GetCapacity(TNode dictionary) { - return CAST( - UnsafeLoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex)); - } + template + TNode GetNumberOfDeletedElements(TNode dictionary) { + return CAST(LoadFixedArrayElement( + dictionary, Dictionary::kNumberOfDeletedElementsIndex)); + } - template - TNode GetNextEnumerationIndex(TNode dictionary) { - return CAST(LoadFixedArrayElement(dictionary, - Dictionary::kNextEnumerationIndexIndex)); - } + template + void SetNumberOfDeletedElements(TNode dictionary, + TNode num_deleted_smi) { + StoreFixedArrayElement(dictionary, + Dictionary::kNumberOfDeletedElementsIndex, + num_deleted_smi, SKIP_WRITE_BARRIER); + } - template - void SetNextEnumerationIndex(TNode dictionary, - TNode next_enum_index_smi) { - StoreFixedArrayElement(dictionary, Dictionary::kNextEnumerationIndexIndex, - next_enum_index_smi, SKIP_WRITE_BARRIER); - } + template + TNode GetCapacity(TNode dictionary) { + return CAST( + UnsafeLoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex)); + } - // Looks up an entry in a NameDictionaryBase successor. If the entry is found - // control goes to {if_found} and {var_name_index} contains an index of the - // key field of the entry found. If the key is not found control goes to - // {if_not_found}. - enum LookupMode { kFindExisting, kFindInsertionIndex }; - - template - TNode LoadName(TNode key); - - template - void NameDictionaryLookup(TNode dictionary, - TNode unique_name, Label* if_found, - TVariable* var_name_index, - Label* if_not_found, - LookupMode mode = kFindExisting); - - Node* ComputeUnseededHash(Node* key); - Node* ComputeSeededHash(Node* key); - - void NumberDictionaryLookup(TNode dictionary, - TNode intptr_index, Label* if_found, - TVariable* var_entry, - Label* if_not_found); - - TNode BasicLoadNumberDictionaryElement( - TNode dictionary, TNode intptr_index, - Label* not_data, Label* if_hole); - void BasicStoreNumberDictionaryElement(TNode dictionary, - TNode intptr_index, - TNode value, Label* not_data, - Label* if_hole, Label* read_only); - - template - void FindInsertionEntry(TNode dictionary, TNode key, - TVariable* var_key_index); - - template - void InsertEntry(TNode dictionary, TNode key, - TNode value, TNode index, - TNode enum_index); - - template - void Add(TNode dictionary, TNode key, TNode value, - Label* bailout); - - // Tries to check if {object} has own {unique_name} property. - void TryHasOwnProperty(Node* object, Node* map, Node* instance_type, - Node* unique_name, Label* if_found, - Label* if_not_found, Label* if_bailout); - - // Operating mode for TryGetOwnProperty and CallGetterIfAccessor - // kReturnAccessorPair is used when we're only getting the property descriptor - enum GetOwnPropertyMode { kCallJSGetter, kReturnAccessorPair }; - // Tries to get {object}'s own {unique_name} property value. If the property - // is an accessor then it also calls a getter. If the property is a double - // field it re-wraps value in an immutable heap number. {unique_name} must be - // a unique name (Symbol or InternalizedString) that is not an array index. - void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map, - Node* instance_type, Node* unique_name, - Label* if_found, Variable* var_value, - Label* if_not_found, Label* if_bailout); - void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map, - Node* instance_type, Node* unique_name, - Label* if_found, Variable* var_value, - Variable* var_details, Variable* var_raw_value, - Label* if_not_found, Label* if_bailout, - GetOwnPropertyMode mode); - - TNode GetProperty(SloppyTNode context, - SloppyTNode receiver, Handle name) { - return GetProperty(context, receiver, HeapConstant(name)); - } + template + TNode GetNextEnumerationIndex(TNode dictionary) { + return CAST(LoadFixedArrayElement(dictionary, + Dictionary::kNextEnumerationIndexIndex)); + } - TNode GetProperty(SloppyTNode context, - SloppyTNode receiver, - SloppyTNode name) { - return CallBuiltin(Builtins::kGetProperty, context, receiver, name); - } + template + void SetNextEnumerationIndex(TNode dictionary, + TNode next_enum_index_smi) { + StoreFixedArrayElement(dictionary, Dictionary::kNextEnumerationIndexIndex, + next_enum_index_smi, SKIP_WRITE_BARRIER); + } - TNode SetPropertyStrict(TNode context, - TNode receiver, TNode key, - TNode value) { - return CallBuiltin(Builtins::kSetProperty, context, receiver, key, value); - } + // Looks up an entry in a NameDictionaryBase successor. If the entry is found + // control goes to {if_found} and {var_name_index} contains an index of the + // key field of the entry found. If the key is not found control goes to + // {if_not_found}. + enum LookupMode { kFindExisting, kFindInsertionIndex }; + + template + TNode LoadName(TNode key); + + template + void NameDictionaryLookup(TNode dictionary, + TNode unique_name, Label* if_found, + TVariable* var_name_index, + Label* if_not_found, + LookupMode mode = kFindExisting); + + Node* ComputeUnseededHash(Node* key); + Node* ComputeSeededHash(Node* key); + + void NumberDictionaryLookup(TNode dictionary, + TNode intptr_index, Label* if_found, + TVariable* var_entry, + Label* if_not_found); + + TNode BasicLoadNumberDictionaryElement( + TNode dictionary, TNode intptr_index, + Label* not_data, Label* if_hole); + void BasicStoreNumberDictionaryElement(TNode dictionary, + TNode intptr_index, + TNode value, Label* not_data, + Label* if_hole, Label* read_only); + + template + void FindInsertionEntry(TNode dictionary, TNode key, + TVariable* var_key_index); + + template + void InsertEntry(TNode dictionary, TNode key, + TNode value, TNode index, + TNode enum_index); + + template + void Add(TNode dictionary, TNode key, TNode value, + Label* bailout); + + // Tries to check if {object} has own {unique_name} property. + void TryHasOwnProperty(Node* object, Node* map, Node* instance_type, + Node* unique_name, Label* if_found, + Label* if_not_found, Label* if_bailout); + + // Operating mode for TryGetOwnProperty and CallGetterIfAccessor + // kReturnAccessorPair is used when we're only getting the property descriptor + enum GetOwnPropertyMode { kCallJSGetter, kReturnAccessorPair }; + // Tries to get {object}'s own {unique_name} property value. If the property + // is an accessor then it also calls a getter. If the property is a double + // field it re-wraps value in an immutable heap number. {unique_name} must be + // a unique name (Symbol or InternalizedString) that is not an array index. + void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map, + Node* instance_type, Node* unique_name, + Label* if_found, Variable* var_value, + Label* if_not_found, Label* if_bailout); + void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map, + Node* instance_type, Node* unique_name, + Label* if_found, Variable* var_value, + Variable* var_details, Variable* var_raw_value, + Label* if_not_found, Label* if_bailout, + GetOwnPropertyMode mode); + + TNode GetProperty(SloppyTNode context, + SloppyTNode receiver, Handle name) { + return GetProperty(context, receiver, HeapConstant(name)); + } - TNode SetPropertyInLiteral(TNode context, - TNode receiver, - TNode key, TNode value) { - return CallBuiltin(Builtins::kSetPropertyInLiteral, context, receiver, key, - value); - } + TNode GetProperty(SloppyTNode context, + SloppyTNode receiver, + SloppyTNode name) { + return CallBuiltin(Builtins::kGetProperty, context, receiver, name); + } - Node* GetMethod(Node* context, Node* object, Handle name, - Label* if_null_or_undefined); + TNode SetPropertyStrict(TNode context, + TNode receiver, TNode key, + TNode value) { + return CallBuiltin(Builtins::kSetProperty, context, receiver, key, value); + } - TNode GetIteratorMethod(TNode context, - TNode heap_obj, - Label* if_iteratorundefined); + TNode SetPropertyInLiteral(TNode context, + TNode receiver, + TNode key, TNode value) { + return CallBuiltin(Builtins::kSetPropertyInLiteral, context, receiver, key, + value); + } - template - TNode CallBuiltin(Builtins::Name id, SloppyTNode context, - TArgs... args) { - return CallStub(Builtins::CallableFor(isolate(), id), context, - args...); - } + Node* GetMethod(Node* context, Node* object, Handle name, + Label* if_null_or_undefined); - template - void TailCallBuiltin(Builtins::Name id, SloppyTNode context, - TArgs... args) { - return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...); - } + TNode GetIteratorMethod(TNode context, + TNode heap_obj, + Label* if_iteratorundefined); + + template + TNode CallBuiltin(Builtins::Name id, SloppyTNode context, + TArgs... args) { + return CallStub(Builtins::CallableFor(isolate(), id), context, + args...); + } - void LoadPropertyFromFastObject(Node* object, Node* map, - TNode descriptors, - Node* name_index, Variable* var_details, - Variable* var_value); + template + void TailCallBuiltin(Builtins::Name id, SloppyTNode context, + TArgs... args) { + return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...); + } - void LoadPropertyFromFastObject(Node* object, Node* map, - TNode descriptors, - Node* name_index, Node* details, - Variable* var_value); + void LoadPropertyFromFastObject(Node* object, Node* map, + TNode descriptors, + Node* name_index, Variable* var_details, + Variable* var_value); - void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry, - Variable* var_details, - Variable* var_value); + void LoadPropertyFromFastObject(Node* object, Node* map, + TNode descriptors, + Node* name_index, Node* details, + Variable* var_value); - void LoadPropertyFromGlobalDictionary(Node* dictionary, Node* entry, + void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry, Variable* var_details, - Variable* var_value, Label* if_deleted); - - // Generic property lookup generator. If the {object} is fast and - // {unique_name} property is found then the control goes to {if_found_fast} - // label and {var_meta_storage} and {var_name_index} will contain - // DescriptorArray and an index of the descriptor's name respectively. - // If the {object} is slow or global then the control goes to {if_found_dict} - // or {if_found_global} and the {var_meta_storage} and {var_name_index} will - // contain a dictionary and an index of the key field of the found entry. - // If property is not found or given lookup is not supported then - // the control goes to {if_not_found} or {if_bailout} respectively. - // - // Note: this code does not check if the global dictionary points to deleted - // entry! This has to be done by the caller. - void TryLookupProperty(SloppyTNode object, SloppyTNode map, - SloppyTNode instance_type, - SloppyTNode unique_name, Label* if_found_fast, - Label* if_found_dict, Label* if_found_global, - TVariable* var_meta_storage, - TVariable* var_name_index, - Label* if_not_found, Label* if_bailout); - - // This is a building block for TryLookupProperty() above. Supports only - // non-special fast and dictionary objects. - void TryLookupPropertyInSimpleObject(TNode object, TNode map, - TNode unique_name, - Label* if_found_fast, - Label* if_found_dict, - TVariable* var_meta_storage, - TVariable* var_name_index, - Label* if_not_found); - - // This method jumps to if_found if the element is known to exist. To - // if_absent if it's known to not exist. To if_not_found if the prototype - // chain needs to be checked. And if_bailout if the lookup is unsupported. - void TryLookupElement(Node* object, Node* map, - SloppyTNode instance_type, - SloppyTNode intptr_index, Label* if_found, - Label* if_absent, Label* if_not_found, - Label* if_bailout); - - // This is a type of a lookup in holder generator function. In case of a - // property lookup the {key} is guaranteed to be an unique name and in case of - // element lookup the key is an Int32 index. - using LookupInHolder = std::function; - - // For integer indexed exotic cases, check if the given string cannot be a - // special index. If we are not sure that the given string is not a special - // index with a simple check, return False. Note that "False" return value - // does not mean that the name_string is a special index in the current - // implementation. - void BranchIfMaybeSpecialIndex(TNode name_string, - Label* if_maybe_special_index, - Label* if_not_special_index); - - // Generic property prototype chain lookup generator. - // For properties it generates lookup using given {lookup_property_in_holder} - // and for elements it uses {lookup_element_in_holder}. - // Upon reaching the end of prototype chain the control goes to {if_end}. - // If it can't handle the case {receiver}/{key} case then the control goes - // to {if_bailout}. - // If {if_proxy} is nullptr, proxies go to if_bailout. - void TryPrototypeChainLookup(Node* receiver, Node* object, Node* key, - const LookupInHolder& lookup_property_in_holder, - const LookupInHolder& lookup_element_in_holder, - Label* if_end, Label* if_bailout, - Label* if_proxy); - - // Instanceof helpers. - // Returns true if {object} has {prototype} somewhere in it's prototype - // chain, otherwise false is returned. Might cause arbitrary side effects - // due to [[GetPrototypeOf]] invocations. - Node* HasInPrototypeChain(Node* context, Node* object, Node* prototype); - // ES6 section 7.3.19 OrdinaryHasInstance (C, O) - Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object); - - // Load type feedback vector from the stub caller's frame. - TNode LoadFeedbackVectorForStub(); - - // Load the value from closure's feedback cell. - TNode LoadFeedbackCellValue(SloppyTNode closure); - - // Load the object from feedback vector cell for the given closure. - // The returned object could be undefined if the closure does not have - // a feedback vector associated with it. - TNode LoadFeedbackVector(SloppyTNode closure); - - // Load the ClosureFeedbackCellArray that contains the feedback cells - // used when creating closures from this function. This array could be - // directly hanging off the FeedbackCell when there is no feedback vector - // or available from the feedback vector's header. - TNode LoadClosureFeedbackArray( - SloppyTNode closure); - - // Update the type feedback vector. - void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id); - - // Report that there was a feedback update, performing any tasks that should - // be done after a feedback update. - void ReportFeedbackUpdate(SloppyTNode feedback_vector, - SloppyTNode slot_id, const char* reason); - - // Combine the new feedback with the existing_feedback. Do nothing if - // existing_feedback is nullptr. - void CombineFeedback(Variable* existing_feedback, int feedback); - void CombineFeedback(Variable* existing_feedback, Node* feedback); - - // Overwrite the existing feedback with new_feedback. Do nothing if - // existing_feedback is nullptr. - void OverwriteFeedback(Variable* existing_feedback, int new_feedback); - - // Check if a property name might require protector invalidation when it is - // used for a property store or deletion. - void CheckForAssociatedProtector(Node* name, Label* if_protector); - - TNode LoadReceiverMap(SloppyTNode receiver); - - enum class ArgumentsAccessMode { kLoad, kStore, kHas }; - // Emits keyed sloppy arguments has. Returns whether the key is in the - // arguments. - Node* HasKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) { - return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout, - ArgumentsAccessMode::kHas); - } + Variable* var_value); + + void LoadPropertyFromGlobalDictionary(Node* dictionary, Node* entry, + Variable* var_details, + Variable* var_value, Label* if_deleted); + + // Generic property lookup generator. If the {object} is fast and + // {unique_name} property is found then the control goes to {if_found_fast} + // label and {var_meta_storage} and {var_name_index} will contain + // DescriptorArray and an index of the descriptor's name respectively. + // If the {object} is slow or global then the control goes to {if_found_dict} + // or {if_found_global} and the {var_meta_storage} and {var_name_index} will + // contain a dictionary and an index of the key field of the found entry. + // If property is not found or given lookup is not supported then + // the control goes to {if_not_found} or {if_bailout} respectively. + // + // Note: this code does not check if the global dictionary points to deleted + // entry! This has to be done by the caller. + void TryLookupProperty(SloppyTNode object, SloppyTNode map, + SloppyTNode instance_type, + SloppyTNode unique_name, Label* if_found_fast, + Label* if_found_dict, Label* if_found_global, + TVariable* var_meta_storage, + TVariable* var_name_index, + Label* if_not_found, Label* if_bailout); + + // This is a building block for TryLookupProperty() above. Supports only + // non-special fast and dictionary objects. + void TryLookupPropertyInSimpleObject(TNode object, TNode map, + TNode unique_name, + Label* if_found_fast, + Label* if_found_dict, + TVariable* var_meta_storage, + TVariable* var_name_index, + Label* if_not_found); + + // This method jumps to if_found if the element is known to exist. To + // if_absent if it's known to not exist. To if_not_found if the prototype + // chain needs to be checked. And if_bailout if the lookup is unsupported. + void TryLookupElement(Node* object, Node* map, + SloppyTNode instance_type, + SloppyTNode intptr_index, Label* if_found, + Label* if_absent, Label* if_not_found, + Label* if_bailout); + + // This is a type of a lookup in holder generator function. In case of a + // property lookup the {key} is guaranteed to be an unique name and in case of + // element lookup the key is an Int32 index. + using LookupInHolder = std::function; + + // For integer indexed exotic cases, check if the given string cannot be a + // special index. If we are not sure that the given string is not a special + // index with a simple check, return False. Note that "False" return value + // does not mean that the name_string is a special index in the current + // implementation. + void BranchIfMaybeSpecialIndex(TNode name_string, + Label* if_maybe_special_index, + Label* if_not_special_index); + + // Generic property prototype chain lookup generator. + // For properties it generates lookup using given {lookup_property_in_holder} + // and for elements it uses {lookup_element_in_holder}. + // Upon reaching the end of prototype chain the control goes to {if_end}. + // If it can't handle the case {receiver}/{key} case then the control goes + // to {if_bailout}. + // If {if_proxy} is nullptr, proxies go to if_bailout. + void TryPrototypeChainLookup(Node* receiver, Node* object, Node* key, + const LookupInHolder& lookup_property_in_holder, + const LookupInHolder& lookup_element_in_holder, + Label* if_end, Label* if_bailout, + Label* if_proxy); + + // Instanceof helpers. + // Returns true if {object} has {prototype} somewhere in it's prototype + // chain, otherwise false is returned. Might cause arbitrary side effects + // due to [[GetPrototypeOf]] invocations. + Node* HasInPrototypeChain(Node* context, Node* object, Node* prototype); + // ES6 section 7.3.19 OrdinaryHasInstance (C, O) + Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object); + + // Load type feedback vector from the stub caller's frame. + TNode LoadFeedbackVectorForStub(); + + // Load the value from closure's feedback cell. + TNode LoadFeedbackCellValue(SloppyTNode closure); + + // Load the object from feedback vector cell for the given closure. + // The returned object could be undefined if the closure does not have + // a feedback vector associated with it. + TNode LoadFeedbackVector(SloppyTNode closure); + + // Load the ClosureFeedbackCellArray that contains the feedback cells + // used when creating closures from this function. This array could be + // directly hanging off the FeedbackCell when there is no feedback vector + // or available from the feedback vector's header. + TNode LoadClosureFeedbackArray( + SloppyTNode closure); + + // Update the type feedback vector. + void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id); + + // Report that there was a feedback update, performing any tasks that should + // be done after a feedback update. + void ReportFeedbackUpdate(SloppyTNode feedback_vector, + SloppyTNode slot_id, const char* reason); + + // Combine the new feedback with the existing_feedback. Do nothing if + // existing_feedback is nullptr. + void CombineFeedback(Variable* existing_feedback, int feedback); + void CombineFeedback(Variable* existing_feedback, Node* feedback); + + // Overwrite the existing feedback with new_feedback. Do nothing if + // existing_feedback is nullptr. + void OverwriteFeedback(Variable* existing_feedback, int new_feedback); + + // Check if a property name might require protector invalidation when it is + // used for a property store or deletion. + void CheckForAssociatedProtector(Node* name, Label* if_protector); + + TNode LoadReceiverMap(SloppyTNode receiver); + + enum class ArgumentsAccessMode { kLoad, kStore, kHas }; + // Emits keyed sloppy arguments has. Returns whether the key is in the + // arguments. + Node* HasKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) { + return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout, + ArgumentsAccessMode::kHas); + } - // Emits keyed sloppy arguments load. Returns either the loaded value. - Node* LoadKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) { - return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout, - ArgumentsAccessMode::kLoad); - } + // Emits keyed sloppy arguments load. Returns either the loaded value. + Node* LoadKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) { + return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout, + ArgumentsAccessMode::kLoad); + } - // Emits keyed sloppy arguments store. - void StoreKeyedSloppyArguments(Node* receiver, Node* key, Node* value, - Label* bailout) { - DCHECK_NOT_NULL(value); - EmitKeyedSloppyArguments(receiver, key, value, bailout, - ArgumentsAccessMode::kStore); - } + // Emits keyed sloppy arguments store. + void StoreKeyedSloppyArguments(Node* receiver, Node* key, Node* value, + Label* bailout) { + DCHECK_NOT_NULL(value); + EmitKeyedSloppyArguments(receiver, key, value, bailout, + ArgumentsAccessMode::kStore); + } - // Loads script context from the script context table. - TNode LoadScriptContext(TNode context, - TNode context_index); - - Node* Int32ToUint8Clamped(Node* int32_value); - Node* Float64ToUint8Clamped(Node* float64_value); - - Node* PrepareValueForWriteToTypedArray(TNode input, - ElementsKind elements_kind, - TNode context); - - // Store value to an elements array with given elements kind. - // TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS - // we pass {value} as BigInt object instead of int64_t. We should - // teach TurboFan to handle int64_t on 32-bit platforms eventually. - void StoreElement(Node* elements, ElementsKind kind, Node* index, Node* value, - ParameterMode mode); - - // Implements the BigInt part of - // https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes, - // including truncation to 64 bits (i.e. modulo 2^64). - // {var_high} is only used on 32-bit platforms. - void BigIntToRawBytes(TNode bigint, TVariable* var_low, - TVariable* var_high); - - void EmitElementStore(Node* object, Node* key, Node* value, - ElementsKind elements_kind, - KeyedAccessStoreMode store_mode, Label* bailout, - Node* context, - Variable* maybe_converted_value = nullptr); - - Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind, - Node* length, Node* key, ParameterMode mode, - Label* bailout); - - Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind, - Node* length, ParameterMode mode, Label* bailout); - - void TransitionElementsKind(Node* object, Node* map, ElementsKind from_kind, - ElementsKind to_kind, Label* bailout); - - void TrapAllocationMemento(Node* object, Label* memento_found); - - TNode PageFromAddress(TNode address); - - // Store a weak in-place reference into the FeedbackVector. - TNode StoreWeakReferenceInFeedbackVector( - SloppyTNode feedback_vector, Node* slot, - SloppyTNode value, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - // Create a new AllocationSite and install it into a feedback vector. - TNode CreateAllocationSiteInFeedbackVector( - SloppyTNode feedback_vector, TNode slot); - - // TODO(ishell, cbruni): Change to HasBoilerplate. - TNode NotHasBoilerplate(TNode maybe_literal_site); - TNode LoadTransitionInfo(TNode allocation_site); - TNode LoadBoilerplate(TNode allocation_site); - TNode LoadElementsKind(TNode allocation_site); - - enum class IndexAdvanceMode { kPre, kPost }; - - using FastLoopBody = std::function; - - Node* BuildFastLoop(const VariableList& var_list, Node* start_index, - Node* end_index, const FastLoopBody& body, int increment, - ParameterMode parameter_mode, - IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre); - - Node* BuildFastLoop(Node* start_index, Node* end_index, - const FastLoopBody& body, int increment, - ParameterMode parameter_mode, - IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) { - return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body, - increment, parameter_mode, advance_mode); - } + // Loads script context from the script context table. + TNode LoadScriptContext(TNode context, + TNode context_index); + + Node* Int32ToUint8Clamped(Node* int32_value); + Node* Float64ToUint8Clamped(Node* float64_value); + + Node* PrepareValueForWriteToTypedArray(TNode input, + ElementsKind elements_kind, + TNode context); + + // Store value to an elements array with given elements kind. + // TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS + // we pass {value} as BigInt object instead of int64_t. We should + // teach TurboFan to handle int64_t on 32-bit platforms eventually. + void StoreElement(Node* elements, ElementsKind kind, Node* index, Node* value, + ParameterMode mode); + + // Implements the BigInt part of + // https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes, + // including truncation to 64 bits (i.e. modulo 2^64). + // {var_high} is only used on 32-bit platforms. + void BigIntToRawBytes(TNode bigint, TVariable* var_low, + TVariable* var_high); + + void EmitElementStore(Node* object, Node* key, Node* value, + ElementsKind elements_kind, + KeyedAccessStoreMode store_mode, Label* bailout, + Node* context, + Variable* maybe_converted_value = nullptr); + + Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind, + Node* length, Node* key, ParameterMode mode, + Label* bailout); + + Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind, + Node* length, ParameterMode mode, Label* bailout); + + void TransitionElementsKind(Node* object, Node* map, ElementsKind from_kind, + ElementsKind to_kind, Label* bailout); + + void TrapAllocationMemento(Node* object, Label* memento_found); + + TNode PageFromAddress(TNode address); + + // Store a weak in-place reference into the FeedbackVector. + TNode StoreWeakReferenceInFeedbackVector( + SloppyTNode feedback_vector, Node* slot, + SloppyTNode value, int additional_offset = 0, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + + // Create a new AllocationSite and install it into a feedback vector. + TNode CreateAllocationSiteInFeedbackVector( + SloppyTNode feedback_vector, TNode slot); + + // TODO(ishell, cbruni): Change to HasBoilerplate. + TNode NotHasBoilerplate(TNode maybe_literal_site); + TNode LoadTransitionInfo(TNode allocation_site); + TNode LoadBoilerplate(TNode allocation_site); + TNode LoadElementsKind(TNode allocation_site); + + enum class IndexAdvanceMode { kPre, kPost }; + + using FastLoopBody = std::function; + + Node* BuildFastLoop(const VariableList& var_list, Node* start_index, + Node* end_index, const FastLoopBody& body, int increment, + ParameterMode parameter_mode, + IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre); + + Node* BuildFastLoop(Node* start_index, Node* end_index, + const FastLoopBody& body, int increment, + ParameterMode parameter_mode, + IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) { + return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body, + increment, parameter_mode, advance_mode); + } - enum class ForEachDirection { kForward, kReverse }; - - using FastFixedArrayForEachBody = - std::function; - - void BuildFastFixedArrayForEach( - const CodeStubAssembler::VariableList& vars, Node* fixed_array, - ElementsKind kind, Node* first_element_inclusive, - Node* last_element_exclusive, const FastFixedArrayForEachBody& body, - ParameterMode mode = INTPTR_PARAMETERS, - ForEachDirection direction = ForEachDirection::kReverse); - - void BuildFastFixedArrayForEach( - Node* fixed_array, ElementsKind kind, Node* first_element_inclusive, - Node* last_element_exclusive, const FastFixedArrayForEachBody& body, - ParameterMode mode = INTPTR_PARAMETERS, - ForEachDirection direction = ForEachDirection::kReverse) { - CodeStubAssembler::VariableList list(0, zone()); - BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive, - last_element_exclusive, body, mode, direction); - } + enum class ForEachDirection { kForward, kReverse }; + + using FastFixedArrayForEachBody = + std::function; + + void BuildFastFixedArrayForEach( + const CodeStubAssembler::VariableList& vars, Node* fixed_array, + ElementsKind kind, Node* first_element_inclusive, + Node* last_element_exclusive, const FastFixedArrayForEachBody& body, + ParameterMode mode = INTPTR_PARAMETERS, + ForEachDirection direction = ForEachDirection::kReverse); + + void BuildFastFixedArrayForEach( + Node* fixed_array, ElementsKind kind, Node* first_element_inclusive, + Node* last_element_exclusive, const FastFixedArrayForEachBody& body, + ParameterMode mode = INTPTR_PARAMETERS, + ForEachDirection direction = ForEachDirection::kReverse) { + CodeStubAssembler::VariableList list(0, zone()); + BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive, + last_element_exclusive, body, mode, direction); + } - TNode GetArrayAllocationSize(Node* element_count, ElementsKind kind, - ParameterMode mode, int header_size) { - return ElementOffsetFromIndex(element_count, kind, mode, header_size); - } + TNode GetArrayAllocationSize(Node* element_count, ElementsKind kind, + ParameterMode mode, int header_size) { + return ElementOffsetFromIndex(element_count, kind, mode, header_size); + } - TNode GetFixedArrayAllocationSize(Node* element_count, - ElementsKind kind, - ParameterMode mode) { - return GetArrayAllocationSize(element_count, kind, mode, - FixedArray::kHeaderSize); - } + TNode GetFixedArrayAllocationSize(Node* element_count, + ElementsKind kind, + ParameterMode mode) { + return GetArrayAllocationSize(element_count, kind, mode, + FixedArray::kHeaderSize); + } - TNode GetPropertyArrayAllocationSize(Node* element_count, - ParameterMode mode) { - return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, mode, - PropertyArray::kHeaderSize); - } + TNode GetPropertyArrayAllocationSize(Node* element_count, + ParameterMode mode) { + return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, mode, + PropertyArray::kHeaderSize); + } - void GotoIfFixedArraySizeDoesntFitInNewSpace(Node* element_count, - Label* doesnt_fit, int base_size, - ParameterMode mode); + void GotoIfFixedArraySizeDoesntFitInNewSpace(Node* element_count, + Label* doesnt_fit, int base_size, + ParameterMode mode); - void InitializeFieldsWithRoot(Node* object, Node* start_offset, - Node* end_offset, RootIndex root); + void InitializeFieldsWithRoot(Node* object, Node* start_offset, + Node* end_offset, RootIndex root); - Node* RelationalComparison(Operation op, Node* left, Node* right, - Node* context, - Variable* var_type_feedback = nullptr); + Node* RelationalComparison(Operation op, Node* left, Node* right, + Node* context, + Variable* var_type_feedback = nullptr); - void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right, - Label* if_true, Label* if_false); + void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right, + Label* if_true, Label* if_false); - void BranchIfNumberEqual(TNode left, TNode right, - Label* if_true, Label* if_false) { - BranchIfNumberRelationalComparison(Operation::kEqual, left, right, if_true, - if_false); - } + void BranchIfNumberEqual(TNode left, TNode right, + Label* if_true, Label* if_false) { + BranchIfNumberRelationalComparison(Operation::kEqual, left, right, if_true, + if_false); + } - void BranchIfNumberNotEqual(TNode left, TNode right, - Label* if_true, Label* if_false) { - BranchIfNumberEqual(left, right, if_false, if_true); - } + void BranchIfNumberNotEqual(TNode left, TNode right, + Label* if_true, Label* if_false) { + BranchIfNumberEqual(left, right, if_false, if_true); + } - void BranchIfNumberLessThan(TNode left, TNode right, - Label* if_true, Label* if_false) { - BranchIfNumberRelationalComparison(Operation::kLessThan, left, right, - if_true, if_false); - } + void BranchIfNumberLessThan(TNode left, TNode right, + Label* if_true, Label* if_false) { + BranchIfNumberRelationalComparison(Operation::kLessThan, left, right, + if_true, if_false); + } - void BranchIfNumberLessThanOrEqual(TNode left, TNode right, - Label* if_true, Label* if_false) { - BranchIfNumberRelationalComparison(Operation::kLessThanOrEqual, left, right, - if_true, if_false); - } + void BranchIfNumberLessThanOrEqual(TNode left, TNode right, + Label* if_true, Label* if_false) { + BranchIfNumberRelationalComparison(Operation::kLessThanOrEqual, left, right, + if_true, if_false); + } - void BranchIfNumberGreaterThan(TNode left, TNode right, - Label* if_true, Label* if_false) { - BranchIfNumberRelationalComparison(Operation::kGreaterThan, left, right, - if_true, if_false); - } + void BranchIfNumberGreaterThan(TNode left, TNode right, + Label* if_true, Label* if_false) { + BranchIfNumberRelationalComparison(Operation::kGreaterThan, left, right, + if_true, if_false); + } - void BranchIfNumberGreaterThanOrEqual(TNode left, TNode right, - Label* if_true, Label* if_false) { - BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left, - right, if_true, if_false); - } + void BranchIfNumberGreaterThanOrEqual(TNode left, TNode right, + Label* if_true, Label* if_false) { + BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left, + right, if_true, if_false); + } - void BranchIfAccessorPair(Node* value, Label* if_accessor_pair, - Label* if_not_accessor_pair) { - GotoIf(TaggedIsSmi(value), if_not_accessor_pair); - Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair); - } + void BranchIfAccessorPair(Node* value, Label* if_accessor_pair, + Label* if_not_accessor_pair) { + GotoIf(TaggedIsSmi(value), if_not_accessor_pair); + Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair); + } - void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false); - - Node* Equal(Node* lhs, Node* rhs, Node* context, - Variable* var_type_feedback = nullptr); - - TNode StrictEqual(SloppyTNode lhs, SloppyTNode rhs, - Variable* var_type_feedback = nullptr); - - // ECMA#sec-samevalue - // Similar to StrictEqual except that NaNs are treated as equal and minus zero - // differs from positive zero. - enum class SameValueMode { kNumbersOnly, kFull }; - void BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true, Label* if_false, - SameValueMode mode = SameValueMode::kFull); - // A part of BranchIfSameValue() that handles two double values. - // Treats NaN == NaN and +0 != -0. - void BranchIfSameNumberValue(TNode lhs_value, - TNode rhs_value, Label* if_true, - Label* if_false); - - enum HasPropertyLookupMode { kHasProperty, kForInHasProperty }; - - TNode HasProperty(SloppyTNode context, - SloppyTNode object, - SloppyTNode key, - HasPropertyLookupMode mode); - - // Due to naming conflict with the builtin function namespace. - TNode HasProperty_Inline(TNode context, - TNode object, - TNode key) { - return HasProperty(context, object, key, - HasPropertyLookupMode::kHasProperty); - } + void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false); + + Node* Equal(Node* lhs, Node* rhs, Node* context, + Variable* var_type_feedback = nullptr); + + TNode StrictEqual(SloppyTNode lhs, SloppyTNode rhs, + Variable* var_type_feedback = nullptr); + + // ECMA#sec-samevalue + // Similar to StrictEqual except that NaNs are treated as equal and minus zero + // differs from positive zero. + enum class SameValueMode { kNumbersOnly, kFull }; + void BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true, Label* if_false, + SameValueMode mode = SameValueMode::kFull); + // A part of BranchIfSameValue() that handles two double values. + // Treats NaN == NaN and +0 != -0. + void BranchIfSameNumberValue(TNode lhs_value, + TNode rhs_value, Label* if_true, + Label* if_false); + + enum HasPropertyLookupMode { kHasProperty, kForInHasProperty }; + + TNode HasProperty(SloppyTNode context, + SloppyTNode object, + SloppyTNode key, + HasPropertyLookupMode mode); + + // Due to naming conflict with the builtin function namespace. + TNode HasProperty_Inline(TNode context, + TNode object, + TNode key) { + return HasProperty(context, object, key, + HasPropertyLookupMode::kHasProperty); + } - Node* Typeof(Node* value); - - TNode GetSuperConstructor(SloppyTNode context, - SloppyTNode active_function); - - TNode SpeciesConstructor( - SloppyTNode context, SloppyTNode object, - SloppyTNode default_constructor); - - Node* InstanceOf(Node* object, Node* callable, Node* context); - - // Debug helpers - Node* IsDebugActive(); - - // JSArrayBuffer helpers - TNode LoadJSArrayBufferBitField(TNode array_buffer); - TNode LoadJSArrayBufferBackingStore( - TNode array_buffer); - TNode IsDetachedBuffer(TNode buffer); - void ThrowIfArrayBufferIsDetached(SloppyTNode context, - TNode array_buffer, - const char* method_name); - - // JSArrayBufferView helpers - TNode LoadJSArrayBufferViewBuffer( - TNode array_buffer_view); - TNode LoadJSArrayBufferViewByteLength( - TNode array_buffer_view); - TNode LoadJSArrayBufferViewByteOffset( - TNode array_buffer_view); - void ThrowIfArrayBufferViewBufferIsDetached( - SloppyTNode context, TNode array_buffer_view, - const char* method_name); - - // JSTypedArray helpers - TNode LoadJSTypedArrayLength(TNode typed_array); - TNode LoadJSTypedArrayBackingStore(TNode typed_array); - - TNode ElementOffsetFromIndex(Node* index, ElementsKind kind, - ParameterMode mode, int base_size = 0); - - // Check that a field offset is within the bounds of the an object. - TNode IsOffsetInBounds(SloppyTNode offset, - SloppyTNode length, int header_size, - ElementsKind kind = HOLEY_ELEMENTS); - - // Load a builtin's code from the builtin array in the isolate. - TNode LoadBuiltin(TNode builtin_id); - - // Figure out the SFI's code object using its data field. - // If |if_compile_lazy| is provided then the execution will go to the given - // label in case of an CompileLazy code object. - TNode GetSharedFunctionInfoCode( - SloppyTNode shared_info, - Label* if_compile_lazy = nullptr); - - Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info, - Node* context); - - // Promise helpers - Node* IsPromiseHookEnabled(); - Node* HasAsyncEventDelegate(); - Node* IsPromiseHookEnabledOrHasAsyncEventDelegate(); - Node* IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(); - - // for..in helpers - void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map, - Label* if_fast, Label* if_slow); - Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime); - - TNode GetArgumentValue(TorqueStructArguments args, - TNode index); - - TorqueStructArguments GetFrameArguments(TNode frame, - TNode argc); - - // Support for printf-style debugging - void Print(const char* s); - void Print(const char* prefix, Node* tagged_value); - inline void Print(SloppyTNode tagged_value) { - return Print(nullptr, tagged_value); - } - inline void Print(TNode tagged_value) { - return Print(nullptr, tagged_value); - } + Node* Typeof(Node* value); + + TNode GetSuperConstructor(SloppyTNode context, + SloppyTNode active_function); + + TNode SpeciesConstructor( + SloppyTNode context, SloppyTNode object, + SloppyTNode default_constructor); + + Node* InstanceOf(Node* object, Node* callable, Node* context); + + // Debug helpers + Node* IsDebugActive(); + + // JSArrayBuffer helpers + TNode LoadJSArrayBufferBitField(TNode array_buffer); + TNode LoadJSArrayBufferBackingStore( + TNode array_buffer); + TNode IsDetachedBuffer(TNode buffer); + void ThrowIfArrayBufferIsDetached(SloppyTNode context, + TNode array_buffer, + const char* method_name); + + // JSArrayBufferView helpers + TNode LoadJSArrayBufferViewBuffer( + TNode array_buffer_view); + TNode LoadJSArrayBufferViewByteLength( + TNode array_buffer_view); + TNode LoadJSArrayBufferViewByteOffset( + TNode array_buffer_view); + void ThrowIfArrayBufferViewBufferIsDetached( + SloppyTNode context, TNode array_buffer_view, + const char* method_name); + + // JSTypedArray helpers + TNode LoadJSTypedArrayLength(TNode typed_array); + TNode LoadJSTypedArrayBackingStore(TNode typed_array); + + TNode ElementOffsetFromIndex(Node* index, ElementsKind kind, + ParameterMode mode, int base_size = 0); + + // Check that a field offset is within the bounds of the an object. + TNode IsOffsetInBounds(SloppyTNode offset, + SloppyTNode length, int header_size, + ElementsKind kind = HOLEY_ELEMENTS); + + // Load a builtin's code from the builtin array in the isolate. + TNode LoadBuiltin(TNode builtin_id); + + // Figure out the SFI's code object using its data field. + // If |if_compile_lazy| is provided then the execution will go to the given + // label in case of an CompileLazy code object. + TNode GetSharedFunctionInfoCode( + SloppyTNode shared_info, + Label* if_compile_lazy = nullptr); + + Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info, + Node* context); + + // Promise helpers + Node* IsPromiseHookEnabled(); + Node* HasAsyncEventDelegate(); + Node* IsPromiseHookEnabledOrHasAsyncEventDelegate(); + Node* IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(); + + // for..in helpers + void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map, + Label* if_fast, Label* if_slow); + Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime); + + TNode GetArgumentValue(TorqueStructArguments args, + TNode index); + + TorqueStructArguments GetFrameArguments(TNode frame, + TNode argc); + + // Support for printf-style debugging + void Print(const char* s); + void Print(const char* prefix, Node* tagged_value); + inline void Print(SloppyTNode tagged_value) { + return Print(nullptr, tagged_value); + } + inline void Print(TNode tagged_value) { + return Print(nullptr, tagged_value); + } - template - Node* MakeTypeError(MessageTemplate message, Node* context, TArgs... args) { - STATIC_ASSERT(sizeof...(TArgs) <= 3); - Node* const make_type_error = LoadContextElement( - LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX); - return CallJS(CodeFactory::Call(isolate()), context, make_type_error, - UndefinedConstant(), SmiConstant(message), args...); - } + template + Node* MakeTypeError(MessageTemplate message, Node* context, TArgs... args) { + STATIC_ASSERT(sizeof...(TArgs) <= 3); + Node* const make_type_error = LoadContextElement( + LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX); + return CallJS(CodeFactory::Call(isolate()), context, make_type_error, + UndefinedConstant(), SmiConstant(message), args...); + } - void Abort(AbortReason reason) { - CallRuntime(Runtime::kAbort, NoContextConstant(), SmiConstant(reason)); - Unreachable(); - } + void Abort(AbortReason reason) { + CallRuntime(Runtime::kAbort, NoContextConstant(), SmiConstant(reason)); + Unreachable(); + } - bool ConstexprBoolNot(bool value) { return !value; } + bool ConstexprBoolNot(bool value) { + return !value; + } - bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; } - bool ConstexprInt31GreaterThanEqual(int31_t a, int31_t b) { return a >= b; } - uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; } - int31_t ConstexprInt31Add(int31_t a, int31_t b) { - int32_t val; - CHECK(!base::bits::SignedAddOverflow32(a, b, &val)); - return val; - } - int31_t ConstexprInt31Mul(int31_t a, int31_t b) { - int32_t val; - CHECK(!base::bits::SignedMulOverflow32(a, b, &val)); - return val; - } + bool ConstexprInt31Equal(int31_t a, int31_t b) { + return a == b; + } + bool ConstexprInt31GreaterThanEqual(int31_t a, int31_t b) { + return a >= b; + } + uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { + return a + b; + } + int31_t ConstexprInt31Add(int31_t a, int31_t b) { + int32_t val; + CHECK(!base::bits::SignedAddOverflow32(a, b, &val)); + return val; + } + int31_t ConstexprInt31Mul(int31_t a, int31_t b) { + int32_t val; + CHECK(!base::bits::SignedMulOverflow32(a, b, &val)); + return val; + } - void PerformStackCheck(TNode context); - - void SetPropertyLength(TNode context, TNode array, - TNode length); - - // Checks that {object_map}'s prototype map is the {initial_prototype_map} and - // makes sure that the field with name at index {descriptor} is still - // constant. If it is not, go to label {if_modified}. - // - // To make the checks robust, the method also asserts that the descriptor has - // the right key, the caller must pass the root index of the key - // in {field_name_root_index}. - // - // This is useful for checking that given function has not been patched - // on the prototype. - void GotoIfInitialPrototypePropertyModified(TNode object_map, - TNode initial_prototype_map, - int descfriptor, - RootIndex field_name_root_index, - Label* if_modified); - struct DescriptorIndexAndName { - DescriptorIndexAndName() {} - DescriptorIndexAndName(int descriptor_index, RootIndex name_root_index) - : descriptor_index(descriptor_index), - name_root_index(name_root_index) {} - - int descriptor_index; - RootIndex name_root_index; - }; - void GotoIfInitialPrototypePropertiesModified( - TNode object_map, TNode initial_prototype_map, - Vector properties, Label* if_modified); - - // Implements DescriptorArray::Search(). - void DescriptorLookup(SloppyTNode unique_name, - SloppyTNode descriptors, - SloppyTNode bitfield3, Label* if_found, - TVariable* var_name_index, - Label* if_not_found); - - // Implements TransitionArray::SearchName() - searches for first transition - // entry with given name (note that there could be multiple entries with - // the same name). - void TransitionLookup(SloppyTNode unique_name, - SloppyTNode transitions, - Label* if_found, TVariable* var_name_index, - Label* if_not_found); - - // Implements generic search procedure like i::Search(). - template - void Lookup(TNode unique_name, TNode array, - TNode number_of_valid_entries, Label* if_found, - TVariable* var_name_index, Label* if_not_found); - - // Implements generic linear search procedure like i::LinearSearch(). - template - void LookupLinear(TNode unique_name, TNode array, - TNode number_of_valid_entries, Label* if_found, - TVariable* var_name_index, Label* if_not_found); - - // Implements generic binary search procedure like i::BinarySearch(). - template - void LookupBinary(TNode unique_name, TNode array, - TNode number_of_valid_entries, Label* if_found, - TVariable* var_name_index, Label* if_not_found); - - // Converts [Descriptor/Transition]Array entry number to a fixed array index. - template - TNode EntryIndexToIndex(TNode entry_index); - - // Implements [Descriptor/Transition]Array::ToKeyIndex. - template - TNode ToKeyIndex(TNode entry_index); - - // Implements [Descriptor/Transition]Array::GetKey. - template - TNode GetKey(TNode array, TNode entry_index); - - // Implements DescriptorArray::GetDetails. - TNode DescriptorArrayGetDetails(TNode descriptors, - TNode descriptor_number); - - using ForEachDescriptorBodyFunction = - std::function descriptor_key_index)>; - - // Descriptor array accessors based on key_index, which is equal to - // DescriptorArray::ToKeyIndex(descriptor). - TNode LoadKeyByKeyIndex(TNode container, - TNode key_index); - TNode LoadDetailsByKeyIndex(TNode container, - TNode key_index); - TNode LoadValueByKeyIndex(TNode container, - TNode key_index); - TNode LoadFieldTypeByKeyIndex(TNode container, - TNode key_index); - - TNode DescriptorEntryToIndex(TNode descriptor); - - // Descriptor array accessors based on descriptor. - TNode LoadKeyByDescriptorEntry(TNode descriptors, - TNode descriptor); - TNode LoadKeyByDescriptorEntry(TNode descriptors, - int descriptor); - TNode LoadDetailsByDescriptorEntry( - TNode descriptors, TNode descriptor); - TNode LoadDetailsByDescriptorEntry( - TNode descriptors, int descriptor); - TNode LoadValueByDescriptorEntry(TNode descriptors, - int descriptor); - TNode LoadFieldTypeByDescriptorEntry( - TNode descriptors, TNode descriptor); - - using ForEachKeyValueFunction = - std::function key, TNode value)>; - - enum ForEachEnumerationMode { - // String and then Symbol properties according to the spec - // ES#sec-object.assign - kEnumerationOrder, - // Order of property addition - kPropertyAdditionOrder, - }; - - // For each JSObject property (in DescriptorArray order), check if the key is - // enumerable, and if so, load the value from the receiver and evaluate the - // closure. - void ForEachEnumerableOwnProperty(TNode context, TNode map, - TNode object, - ForEachEnumerationMode mode, - const ForEachKeyValueFunction& body, - Label* bailout); - - TNode CallGetterIfAccessor(Node* value, Node* details, Node* context, - Node* receiver, Label* if_bailout, - GetOwnPropertyMode mode = kCallJSGetter); - - TNode TryToIntptr(Node* key, Label* miss); - - void BranchIfPrototypesHaveNoElements(Node* receiver_map, - Label* definitely_no_elements, - Label* possibly_elements); - - void InitializeFunctionContext(Node* native_context, Node* context, - int slots); - - TNode ArrayCreate(TNode context, TNode length); - - // Allocate a clone of a mutable primitive, if {object} is a - // MutableHeapNumber. - TNode CloneIfMutablePrimitive(TNode object); - - private: - friend class CodeStubArguments; - - void HandleBreakOnNode(); - - TNode AllocateRawDoubleAligned(TNode size_in_bytes, - AllocationFlags flags, - TNode top_address, - TNode limit_address); - TNode AllocateRawUnaligned(TNode size_in_bytes, - AllocationFlags flags, - TNode top_address, - TNode limit_address); - TNode AllocateRaw(TNode size_in_bytes, - AllocationFlags flags, - TNode top_address, - TNode limit_address); - - // Allocate and return a JSArray of given total size in bytes with header - // fields initialized. - TNode AllocateUninitializedJSArray(TNode array_map, - TNode length, - Node* allocation_site, - TNode size_in_bytes); - - TNode IsValidSmi(TNode smi); - Node* SmiShiftBitsConstant(); - - // Emits keyed sloppy arguments load if the |value| is nullptr or store - // otherwise. Returns either the loaded value or |value|. - Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value, - Label* bailout, - ArgumentsAccessMode access_mode); - - TNode AllocateSlicedString(RootIndex map_root_index, - TNode length, - TNode parent, TNode offset); - - // Allocate a MutableHeapNumber without initializing its value. - TNode AllocateMutableHeapNumber(); - - Node* SelectImpl(TNode condition, const NodeGenerator& true_body, - const NodeGenerator& false_body, MachineRepresentation rep); - - // Implements [Descriptor/Transition]Array::number_of_entries. - template - TNode NumberOfEntries(TNode array); - - // Implements [Descriptor/Transition]Array::GetSortedKeyIndex. - template - TNode GetSortedKeyIndex(TNode descriptors, - TNode entry_index); - - TNode CollectFeedbackForString(SloppyTNode instance_type); - void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal, - Variable* var_type_feedback = nullptr); - TNode AllocAndCopyStringCharacters(Node* from, - Node* from_instance_type, - TNode from_index, - TNode character_count); - - static const int kElementLoopUnrollThreshold = 8; - - // {convert_bigint} is only meaningful when {mode} == kToNumber. - Node* NonNumberToNumberOrNumeric( - Node* context, Node* input, Object::Conversion mode, - BigIntHandling bigint_handling = BigIntHandling::kThrow); - - void TaggedToNumeric(Node* context, Node* value, Label* done, - Variable* var_numeric, Variable* var_feedback); - - template - void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number, - Variable* var_word32, - Label* if_bigint = nullptr, - Variable* var_bigint = nullptr, - Variable* var_feedback = nullptr); - - private: - // Low-level accessors for Descriptor arrays. - template - TNode LoadDescriptorArrayElement(TNode object, - TNode index, - int additional_offset); + void PerformStackCheck(TNode context); + + void SetPropertyLength(TNode context, TNode array, + TNode length); + + // Checks that {object_map}'s prototype map is the {initial_prototype_map} and + // makes sure that the field with name at index {descriptor} is still + // constant. If it is not, go to label {if_modified}. + // + // To make the checks robust, the method also asserts that the descriptor has + // the right key, the caller must pass the root index of the key + // in {field_name_root_index}. + // + // This is useful for checking that given function has not been patched + // on the prototype. + void GotoIfInitialPrototypePropertyModified(TNode object_map, + TNode initial_prototype_map, + int descfriptor, + RootIndex field_name_root_index, + Label* if_modified); + struct DescriptorIndexAndName { + DescriptorIndexAndName() {} + DescriptorIndexAndName(int descriptor_index, RootIndex name_root_index) + : descriptor_index(descriptor_index), + name_root_index(name_root_index) {} + + int descriptor_index; + RootIndex name_root_index; + }; + void GotoIfInitialPrototypePropertiesModified( + TNode object_map, TNode initial_prototype_map, + Vector properties, Label* if_modified); + + // Implements DescriptorArray::Search(). + void DescriptorLookup(SloppyTNode unique_name, + SloppyTNode descriptors, + SloppyTNode bitfield3, Label* if_found, + TVariable* var_name_index, + Label* if_not_found); + + // Implements TransitionArray::SearchName() - searches for first transition + // entry with given name (note that there could be multiple entries with + // the same name). + void TransitionLookup(SloppyTNode unique_name, + SloppyTNode transitions, + Label* if_found, TVariable* var_name_index, + Label* if_not_found); + + // Implements generic search procedure like i::Search(). + template + void Lookup(TNode unique_name, TNode array, + TNode number_of_valid_entries, Label* if_found, + TVariable* var_name_index, Label* if_not_found); + + // Implements generic linear search procedure like i::LinearSearch(). + template + void LookupLinear(TNode unique_name, TNode array, + TNode number_of_valid_entries, Label* if_found, + TVariable* var_name_index, Label* if_not_found); + + // Implements generic binary search procedure like i::BinarySearch(). + template + void LookupBinary(TNode unique_name, TNode array, + TNode number_of_valid_entries, Label* if_found, + TVariable* var_name_index, Label* if_not_found); + + // Converts [Descriptor/Transition]Array entry number to a fixed array index. + template + TNode EntryIndexToIndex(TNode entry_index); + + // Implements [Descriptor/Transition]Array::ToKeyIndex. + template + TNode ToKeyIndex(TNode entry_index); + + // Implements [Descriptor/Transition]Array::GetKey. + template + TNode GetKey(TNode array, TNode entry_index); + + // Implements DescriptorArray::GetDetails. + TNode DescriptorArrayGetDetails(TNode descriptors, + TNode descriptor_number); + + using ForEachDescriptorBodyFunction = + std::function descriptor_key_index)>; + + // Descriptor array accessors based on key_index, which is equal to + // DescriptorArray::ToKeyIndex(descriptor). + TNode LoadKeyByKeyIndex(TNode container, + TNode key_index); + TNode LoadDetailsByKeyIndex(TNode container, + TNode key_index); + TNode LoadValueByKeyIndex(TNode container, + TNode key_index); + TNode LoadFieldTypeByKeyIndex(TNode container, + TNode key_index); + + TNode DescriptorEntryToIndex(TNode descriptor); + + // Descriptor array accessors based on descriptor. + TNode LoadKeyByDescriptorEntry(TNode descriptors, + TNode descriptor); + TNode LoadKeyByDescriptorEntry(TNode descriptors, + int descriptor); + TNode LoadDetailsByDescriptorEntry( + TNode descriptors, TNode descriptor); + TNode LoadDetailsByDescriptorEntry( + TNode descriptors, int descriptor); + TNode LoadValueByDescriptorEntry(TNode descriptors, + int descriptor); + TNode LoadFieldTypeByDescriptorEntry( + TNode descriptors, TNode descriptor); + + using ForEachKeyValueFunction = + std::function key, TNode value)>; + + enum ForEachEnumerationMode { + // String and then Symbol properties according to the spec + // ES#sec-object.assign + kEnumerationOrder, + // Order of property addition + kPropertyAdditionOrder, + }; + + // For each JSObject property (in DescriptorArray order), check if the key is + // enumerable, and if so, load the value from the receiver and evaluate the + // closure. + void ForEachEnumerableOwnProperty(TNode context, TNode map, + TNode object, + ForEachEnumerationMode mode, + const ForEachKeyValueFunction& body, + Label* bailout); + + TNode CallGetterIfAccessor(Node* value, Node* details, Node* context, + Node* receiver, Label* if_bailout, + GetOwnPropertyMode mode = kCallJSGetter); + + TNode TryToIntptr(Node* key, Label* miss); + + void BranchIfPrototypesHaveNoElements(Node* receiver_map, + Label* definitely_no_elements, + Label* possibly_elements); + + void InitializeFunctionContext(Node* native_context, Node* context, + int slots); + + TNode ArrayCreate(TNode context, TNode length); + + // Allocate a clone of a mutable primitive, if {object} is a + // MutableHeapNumber. + TNode CloneIfMutablePrimitive(TNode object); + +private: + friend class CodeStubArguments; + + void HandleBreakOnNode(); + + TNode AllocateRawDoubleAligned(TNode size_in_bytes, + AllocationFlags flags, + TNode top_address, + TNode limit_address); + TNode AllocateRawUnaligned(TNode size_in_bytes, + AllocationFlags flags, + TNode top_address, + TNode limit_address); + TNode AllocateRaw(TNode size_in_bytes, + AllocationFlags flags, + TNode top_address, + TNode limit_address); + + // Allocate and return a JSArray of given total size in bytes with header + // fields initialized. + TNode AllocateUninitializedJSArray(TNode array_map, + TNode length, + Node* allocation_site, + TNode size_in_bytes); + + TNode IsValidSmi(TNode smi); + Node* SmiShiftBitsConstant(); + + // Emits keyed sloppy arguments load if the |value| is nullptr or store + // otherwise. Returns either the loaded value or |value|. + Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value, + Label* bailout, + ArgumentsAccessMode access_mode); + + TNode AllocateSlicedString(RootIndex map_root_index, + TNode length, + TNode parent, TNode offset); + + // Allocate a MutableHeapNumber without initializing its value. + TNode AllocateMutableHeapNumber(); + + Node* SelectImpl(TNode condition, const NodeGenerator& true_body, + const NodeGenerator& false_body, MachineRepresentation rep); + + // Implements [Descriptor/Transition]Array::number_of_entries. + template + TNode NumberOfEntries(TNode array); + + // Implements [Descriptor/Transition]Array::GetSortedKeyIndex. + template + TNode GetSortedKeyIndex(TNode descriptors, + TNode entry_index); + + TNode CollectFeedbackForString(SloppyTNode instance_type); + void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal, + Variable* var_type_feedback = nullptr); + TNode AllocAndCopyStringCharacters(Node* from, + Node* from_instance_type, + TNode from_index, + TNode character_count); + + static const int kElementLoopUnrollThreshold = 8; + + // {convert_bigint} is only meaningful when {mode} == kToNumber. + Node* NonNumberToNumberOrNumeric( + Node* context, Node* input, Object::Conversion mode, + BigIntHandling bigint_handling = BigIntHandling::kThrow); + + void TaggedToNumeric(Node* context, Node* value, Label* done, + Variable* var_numeric, Variable* var_feedback); + + template + void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number, + Variable* var_word32, + Label* if_bigint = nullptr, + Variable* var_bigint = nullptr, + Variable* var_feedback = nullptr); + +private: + // Low-level accessors for Descriptor arrays. + template + TNode LoadDescriptorArrayElement(TNode object, + TNode index, + int additional_offset); }; class V8_EXPORT_PRIVATE CodeStubArguments { - public: - using Node = compiler::Node; - template - using TNode = compiler::TNode; - template - using SloppyTNode = compiler::SloppyTNode; - enum ReceiverMode { kHasReceiver, kNoReceiver }; - - // |argc| is an intptr value which specifies the number of arguments passed - // to the builtin excluding the receiver. The arguments will include a - // receiver iff |receiver_mode| is kHasReceiver. - CodeStubArguments(CodeStubAssembler* assembler, Node* argc, - ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) - : CodeStubArguments(assembler, argc, nullptr, - CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) { - } +public: + using Node = compiler::Node; + template + using TNode = compiler::TNode; + template + using SloppyTNode = compiler::SloppyTNode; + enum ReceiverMode { kHasReceiver, kNoReceiver }; + + // |argc| is an intptr value which specifies the number of arguments passed + // to the builtin excluding the receiver. The arguments will include a + // receiver iff |receiver_mode| is kHasReceiver. + CodeStubArguments(CodeStubAssembler* assembler, Node* argc, + ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) + : CodeStubArguments(assembler, argc, nullptr, + CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) { + } - // |argc| is either a smi or intptr depending on |param_mode|. The arguments - // include a receiver iff |receiver_mode| is kHasReceiver. - CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp, - CodeStubAssembler::ParameterMode param_mode, - ReceiverMode receiver_mode = ReceiverMode::kHasReceiver); - - // Used by Torque to construct arguments based on a Torque-defined - // struct of values. - CodeStubArguments(CodeStubAssembler* assembler, - TorqueStructArguments torque_arguments) - : assembler_(assembler), - argc_mode_(CodeStubAssembler::INTPTR_PARAMETERS), - receiver_mode_(ReceiverMode::kHasReceiver), - argc_(torque_arguments.length), - base_(torque_arguments.base), - fp_(torque_arguments.frame) {} - - TNode GetReceiver() const; - // Replaces receiver argument on the expression stack. Should be used only - // for manipulating arguments in trampoline builtins before tail calling - // further with passing all the JS arguments as is. - void SetReceiver(TNode object) const; - - // Computes address of the index'th argument. - TNode AtIndexPtr(Node* index, + // |argc| is either a smi or intptr depending on |param_mode|. The arguments + // include a receiver iff |receiver_mode| is kHasReceiver. + CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp, + CodeStubAssembler::ParameterMode param_mode, + ReceiverMode receiver_mode = ReceiverMode::kHasReceiver); + + // Used by Torque to construct arguments based on a Torque-defined + // struct of values. + CodeStubArguments(CodeStubAssembler* assembler, + TorqueStructArguments torque_arguments) + : assembler_(assembler), + argc_mode_(CodeStubAssembler::INTPTR_PARAMETERS), + receiver_mode_(ReceiverMode::kHasReceiver), + argc_(torque_arguments.length), + base_(torque_arguments.base), + fp_(torque_arguments.frame) {} + + TNode GetReceiver() const; + // Replaces receiver argument on the expression stack. Should be used only + // for manipulating arguments in trampoline builtins before tail calling + // further with passing all the JS arguments as is. + void SetReceiver(TNode object) const; + + // Computes address of the index'th argument. + TNode AtIndexPtr(Node* index, + CodeStubAssembler::ParameterMode mode = + CodeStubAssembler::INTPTR_PARAMETERS) const; + + // |index| is zero-based and does not include the receiver + TNode AtIndex(Node* index, CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS) const; - // |index| is zero-based and does not include the receiver - TNode AtIndex(Node* index, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS) const; - - TNode AtIndex(int index) const; + TNode AtIndex(int index) const; - TNode GetOptionalArgumentValue(int index) { - return GetOptionalArgumentValue(index, assembler_->UndefinedConstant()); - } - TNode GetOptionalArgumentValue(int index, - TNode default_value); + TNode GetOptionalArgumentValue(int index) { + return GetOptionalArgumentValue(index, assembler_->UndefinedConstant()); + } + TNode GetOptionalArgumentValue(int index, + TNode default_value); - Node* GetLength(CodeStubAssembler::ParameterMode mode) const { - DCHECK_EQ(mode, argc_mode_); - return argc_; - } + Node* GetLength(CodeStubAssembler::ParameterMode mode) const { + DCHECK_EQ(mode, argc_mode_); + return argc_; + } - TorqueStructArguments GetTorqueArguments() const { - DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS); - return TorqueStructArguments{assembler_->UncheckedCast(fp_), base_, - assembler_->UncheckedCast(argc_)}; - } + TorqueStructArguments GetTorqueArguments() const { + DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS); + return TorqueStructArguments{assembler_->UncheckedCast(fp_), base_, + assembler_->UncheckedCast(argc_)}; + } - TNode GetOptionalArgumentValue(TNode index) { - return GetOptionalArgumentValue(index, assembler_->UndefinedConstant()); - } - TNode GetOptionalArgumentValue(TNode index, - TNode default_value); - TNode GetLength() const { - DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS); - return assembler_->UncheckedCast(argc_); - } + TNode GetOptionalArgumentValue(TNode index) { + return GetOptionalArgumentValue(index, assembler_->UndefinedConstant()); + } + TNode GetOptionalArgumentValue(TNode index, + TNode default_value); + TNode GetLength() const { + DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS); + return assembler_->UncheckedCast(argc_); + } - using ForEachBodyFunction = std::function; + using ForEachBodyFunction = std::function; - // Iteration doesn't include the receiver. |first| and |last| are zero-based. - void ForEach(const ForEachBodyFunction& body, Node* first = nullptr, - Node* last = nullptr, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS) { - CodeStubAssembler::VariableList list(0, assembler_->zone()); - ForEach(list, body, first, last); - } + // Iteration doesn't include the receiver. |first| and |last| are zero-based. + void ForEach(const ForEachBodyFunction& body, Node* first = nullptr, + Node* last = nullptr, + CodeStubAssembler::ParameterMode mode = + CodeStubAssembler::INTPTR_PARAMETERS) { + CodeStubAssembler::VariableList list(0, assembler_->zone()); + ForEach(list, body, first, last); + } - // Iteration doesn't include the receiver. |first| and |last| are zero-based. - void ForEach(const CodeStubAssembler::VariableList& vars, - const ForEachBodyFunction& body, Node* first = nullptr, - Node* last = nullptr, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS); + // Iteration doesn't include the receiver. |first| and |last| are zero-based. + void ForEach(const CodeStubAssembler::VariableList& vars, + const ForEachBodyFunction& body, Node* first = nullptr, + Node* last = nullptr, + CodeStubAssembler::ParameterMode mode = + CodeStubAssembler::INTPTR_PARAMETERS); - void PopAndReturn(Node* value); + void PopAndReturn(Node* value); - private: - Node* GetArguments(); +private: + Node* GetArguments(); - CodeStubAssembler* assembler_; - CodeStubAssembler::ParameterMode argc_mode_; - ReceiverMode receiver_mode_; - Node* argc_; - TNode base_; - Node* fp_; + CodeStubAssembler* assembler_; + CodeStubAssembler::ParameterMode argc_mode_; + ReceiverMode receiver_mode_; + Node* argc_; + TNode base_; + Node* fp_; }; class ToDirectStringAssembler : public CodeStubAssembler { - private: - enum StringPointerKind { PTR_TO_DATA, PTR_TO_STRING }; - - public: - enum Flag { - kDontUnpackSlicedStrings = 1 << 0, - }; - using Flags = base::Flags; - - ToDirectStringAssembler(compiler::CodeAssemblerState* state, Node* string, - Flags flags = Flags()); - - // Converts flat cons, thin, and sliced strings and returns the direct - // string. The result can be either a sequential or external string. - // Jumps to if_bailout if the string if the string is indirect and cannot - // be unpacked. - TNode TryToDirect(Label* if_bailout); - - // Returns a pointer to the beginning of the string data. - // Jumps to if_bailout if the external string cannot be unpacked. - TNode PointerToData(Label* if_bailout) { - return TryToSequential(PTR_TO_DATA, if_bailout); - } +private: + enum StringPointerKind { PTR_TO_DATA, PTR_TO_STRING }; + +public: + enum Flag { + kDontUnpackSlicedStrings = 1 << 0, + }; + using Flags = base::Flags; + + ToDirectStringAssembler(compiler::CodeAssemblerState* state, Node* string, + Flags flags = Flags()); + + // Converts flat cons, thin, and sliced strings and returns the direct + // string. The result can be either a sequential or external string. + // Jumps to if_bailout if the string if the string is indirect and cannot + // be unpacked. + TNode TryToDirect(Label* if_bailout); + + // Returns a pointer to the beginning of the string data. + // Jumps to if_bailout if the external string cannot be unpacked. + TNode PointerToData(Label* if_bailout) { + return TryToSequential(PTR_TO_DATA, if_bailout); + } - // Returns a pointer that, offset-wise, looks like a String. - // Jumps to if_bailout if the external string cannot be unpacked. - TNode PointerToString(Label* if_bailout) { - return TryToSequential(PTR_TO_STRING, if_bailout); - } + // Returns a pointer that, offset-wise, looks like a String. + // Jumps to if_bailout if the external string cannot be unpacked. + TNode PointerToString(Label* if_bailout) { + return TryToSequential(PTR_TO_STRING, if_bailout); + } - Node* string() { return var_string_.value(); } - Node* instance_type() { return var_instance_type_.value(); } - TNode offset() { - return UncheckedCast(var_offset_.value()); - } - Node* is_external() { return var_is_external_.value(); } + Node* string() { + return var_string_.value(); + } + Node* instance_type() { + return var_instance_type_.value(); + } + TNode offset() { + return UncheckedCast(var_offset_.value()); + } + Node* is_external() { + return var_is_external_.value(); + } - private: - TNode TryToSequential(StringPointerKind ptr_kind, Label* if_bailout); +private: + TNode TryToSequential(StringPointerKind ptr_kind, Label* if_bailout); - Variable var_string_; - Variable var_instance_type_; - Variable var_offset_; - Variable var_is_external_; + Variable var_string_; + Variable var_instance_type_; + Variable var_offset_; + Variable var_is_external_; - const Flags flags_; + const Flags flags_; }; DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags) diff --git a/deps/v8/third_party/inspector_protocol/code_generator.py b/deps/v8/third_party/inspector_protocol/code_generator.py index 5cf5a308c5e6a0..84b33caa370ed5 100755 --- a/deps/v8/third_party/inspector_protocol/code_generator.py +++ b/deps/v8/third_party/inspector_protocol/code_generator.py @@ -2,27 +2,28 @@ # Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. - -import os.path -import sys import argparse import collections +import copy import functools +import os.path import re -import copy +import sys + try: - import json + import json except ImportError: - import simplejson as json + import simplejson as json import pdl try: - unicode + unicode except NameError: - # Define unicode for Py3 - def unicode(s, *_): - return s + # Define unicode for Py3 + def unicode(s, *_): + return s + # Path handling for libraries and templates # Paths have to be normalized because Jinja uses the exact template path to @@ -34,699 +35,793 @@ def unicode(s, *_): # since some compile processes will try to read the partially written cache. module_path, module_filename = os.path.split(os.path.realpath(__file__)) + def read_config(): - # pylint: disable=W0703 - def json_to_object(data, output_base, config_base): - def json_object_hook(object_dict): - items = [(k, os.path.join(config_base, v) if k == "path" else v) - for (k, v) in object_dict.items()] - items = [(k, os.path.join(output_base, v) if k == "output" else v) - for (k, v) in items] - keys, values = list(zip(*items)) - # 'async' is a keyword since Python 3.7. - # Avoid namedtuple(rename=True) for compatibility with Python 2.X. - keys = tuple('async_' if k == 'async' else k for k in keys) - return collections.namedtuple('X', keys)(*values) - return json.loads(data, object_hook=json_object_hook) - - def init_defaults(config_tuple, path, defaults): - keys = list(config_tuple._fields) # pylint: disable=E1101 - values = [getattr(config_tuple, k) for k in keys] - for i in range(len(keys)): - if hasattr(values[i], "_fields"): - values[i] = init_defaults(values[i], path + "." + keys[i], defaults) - for optional in defaults: - if optional.find(path + ".") != 0: - continue - optional_key = optional[len(path) + 1:] - if optional_key.find(".") == -1 and optional_key not in keys: - keys.append(optional_key) - values.append(defaults[optional]) - return collections.namedtuple('X', keys)(*values) - - try: - cmdline_parser = argparse.ArgumentParser() - cmdline_parser.add_argument("--output_base", type=unicode, required=True) - cmdline_parser.add_argument("--jinja_dir", type=unicode, required=True) - cmdline_parser.add_argument("--config", type=unicode, required=True) - cmdline_parser.add_argument("--config_value", default=[], action="append") - cmdline_parser.add_argument( - "--inspector_protocol_dir", type=unicode, required=True, - help=("directory with code_generator.py and C++ encoding / binding " - "libraries, relative to the root of the source tree.")) - arg_options = cmdline_parser.parse_args() - jinja_dir = arg_options.jinja_dir - output_base = arg_options.output_base - config_file = arg_options.config - config_base = os.path.dirname(config_file) - config_values = arg_options.config_value - inspector_protocol_dir = arg_options.inspector_protocol_dir.lstrip('/') - except Exception: - # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html - exc = sys.exc_info()[1] - sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % exc) - exit(1) - - try: - config_json_file = open(config_file, "r") - config_json_string = config_json_file.read() - config_partial = json_to_object(config_json_string, output_base, - config_base) - config_json_file.close() - defaults = { - ".use_snake_file_names": False, - ".use_title_case_methods": False, - ".imported": False, - ".imported.export_macro": "", - ".imported.export_header": False, - ".imported.header": False, - ".imported.package": False, - ".imported.options": False, - ".protocol.export_macro": "", - ".protocol.export_header": False, - ".protocol.options": False, - ".protocol.file_name_prefix": "", - ".exported": False, - ".exported.export_macro": "", - ".exported.export_header": False, - ".lib": False, - ".lib.export_macro": "", - ".lib.export_header": False, - # The encoding lib consists of encoding/encoding.h and - # encoding/encoding.cc in its subdirectory, which binaries - # must link / depend on. - ".encoding_lib.header": os.path.join(inspector_protocol_dir, - "encoding/encoding.h"), - ".encoding_lib.namespace": "", - # Ditto for bindings, see bindings/bindings.h. - ".bindings_lib.header": os.path.join(inspector_protocol_dir, - "bindings/bindings.h"), - ".bindings_lib.namespace": "" - } - for key_value in config_values: - parts = key_value.split("=") - if len(parts) == 2: - defaults["." + parts[0]] = parts[1] - return (jinja_dir, config_file, init_defaults(config_partial, "", defaults)) - except Exception: - # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html - exc = sys.exc_info()[1] - sys.stderr.write("Failed to parse config file: %s\n\n" % exc) - exit(1) + # pylint: disable=W0703 + def json_to_object(data, output_base, config_base): + def json_object_hook(object_dict): + items = [(k, os.path.join(config_base, v) if k == "path" else v) + for (k, v) in object_dict.items()] + items = [(k, os.path.join(output_base, v) if k == "output" else v) + for (k, v) in items] + keys, values = list(zip(*items)) + # 'async' is a keyword since Python 3.7. + # Avoid namedtuple(rename=True) for compatibility with Python 2.X. + keys = tuple("async_" if k == "async" else k for k in keys) + return collections.namedtuple("X", keys)(*values) + + return json.loads(data, object_hook=json_object_hook) + + def init_defaults(config_tuple, path, defaults): + keys = list(config_tuple._fields) # pylint: disable=E1101 + values = [getattr(config_tuple, k) for k in keys] + for i in range(len(keys)): + if hasattr(values[i], "_fields"): + values[i] = init_defaults(values[i], path + "." + keys[i], + defaults) + for optional in defaults: + if optional.find(path + ".") != 0: + continue + optional_key = optional[len(path) + 1:] + if optional_key.find(".") == -1 and optional_key not in keys: + keys.append(optional_key) + values.append(defaults[optional]) + return collections.namedtuple("X", keys)(*values) + + try: + cmdline_parser = argparse.ArgumentParser() + cmdline_parser.add_argument("--output_base", + type=unicode, + required=True) + cmdline_parser.add_argument("--jinja_dir", type=unicode, required=True) + cmdline_parser.add_argument("--config", type=unicode, required=True) + cmdline_parser.add_argument("--config_value", + default=[], + action="append") + cmdline_parser.add_argument( + "--inspector_protocol_dir", + type=unicode, + required=True, + help=( + "directory with code_generator.py and C++ encoding / binding " + "libraries, relative to the root of the source tree."), + ) + arg_options = cmdline_parser.parse_args() + jinja_dir = arg_options.jinja_dir + output_base = arg_options.output_base + config_file = arg_options.config + config_base = os.path.dirname(config_file) + config_values = arg_options.config_value + inspector_protocol_dir = arg_options.inspector_protocol_dir.lstrip("/") + except Exception: + # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html + exc = sys.exc_info()[1] + sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % + exc) + exit(1) + + try: + config_json_file = open(config_file, "r") + config_json_string = config_json_file.read() + config_partial = json_to_object(config_json_string, output_base, + config_base) + config_json_file.close() + defaults = { + ".use_snake_file_names": + False, + ".use_title_case_methods": + False, + ".imported": + False, + ".imported.export_macro": + "", + ".imported.export_header": + False, + ".imported.header": + False, + ".imported.package": + False, + ".imported.options": + False, + ".protocol.export_macro": + "", + ".protocol.export_header": + False, + ".protocol.options": + False, + ".protocol.file_name_prefix": + "", + ".exported": + False, + ".exported.export_macro": + "", + ".exported.export_header": + False, + ".lib": + False, + ".lib.export_macro": + "", + ".lib.export_header": + False, + # The encoding lib consists of encoding/encoding.h and + # encoding/encoding.cc in its subdirectory, which binaries + # must link / depend on. + ".encoding_lib.header": + os.path.join(inspector_protocol_dir, "encoding/encoding.h"), + ".encoding_lib.namespace": + "", + # Ditto for bindings, see bindings/bindings.h. + ".bindings_lib.header": + os.path.join(inspector_protocol_dir, "bindings/bindings.h"), + ".bindings_lib.namespace": + "", + } + for key_value in config_values: + parts = key_value.split("=") + if len(parts) == 2: + defaults["." + parts[0]] = parts[1] + return (jinja_dir, config_file, + init_defaults(config_partial, "", defaults)) + except Exception: + # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html + exc = sys.exc_info()[1] + sys.stderr.write("Failed to parse config file: %s\n\n" % exc) + exit(1) # ---- Begin of utilities exposed to generator ---- def to_title_case(name): - return name[:1].upper() + name[1:] + return name[:1].upper() + name[1:] def dash_to_camelcase(word): - prefix = "" - if word[0] == "-": - prefix = "Negative" - word = word[1:] - return prefix + "".join(to_title_case(x) or "-" for x in word.split("-")) + prefix = "" + if word[0] == "-": + prefix = "Negative" + word = word[1:] + return prefix + "".join(to_title_case(x) or "-" for x in word.split("-")) def to_snake_case(name): - return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", name, sys.maxsize).lower() + return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", name, sys.maxsize).lower() def to_method_case(config, name): - if config.use_title_case_methods: - return to_title_case(name) - return name + if config.use_title_case_methods: + return to_title_case(name) + return name def join_arrays(dict, keys): - result = [] - for key in keys: - if key in dict: - result += dict[key] - return result + result = [] + for key in keys: + if key in dict: + result += dict[key] + return result def format_include(config, header, file_name=None): - if file_name is not None: - header = header + "/" + file_name + ".h" - header = "\"" + header + "\"" if header[0] not in "<\"" else header - if config.use_snake_file_names: - header = to_snake_case(header) - return header + if file_name is not None: + header = header + "/" + file_name + ".h" + header = '"' + header + '"' if header[0] not in '<"' else header + if config.use_snake_file_names: + header = to_snake_case(header) + return header def format_domain_include(config, header, file_name): - return format_include(config, header, - config.protocol.file_name_prefix + file_name) + return format_include(config, header, + config.protocol.file_name_prefix + file_name) def to_file_name(config, file_name): - if config.use_snake_file_names: - return to_snake_case(file_name).replace(".cpp", ".cc") - return file_name + if config.use_snake_file_names: + return to_snake_case(file_name).replace(".cpp", ".cc") + return file_name # ---- End of utilities exposed to generator ---- def initialize_jinja_env(jinja_dir, cache_dir, config): - # pylint: disable=F0401 - sys.path.insert(1, os.path.abspath(jinja_dir)) - import jinja2 - - jinja_env = jinja2.Environment( - loader=jinja2.FileSystemLoader(module_path), - # Bytecode cache is not concurrency-safe unless pre-cached: - # if pre-cached this is read-only, but writing creates a race condition. - bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir), - keep_trailing_newline=True, # newline-terminate generated files - lstrip_blocks=True, # so can indent control flow tags - trim_blocks=True) - jinja_env.filters.update({ - "to_title_case": to_title_case, - "dash_to_camelcase": dash_to_camelcase, - "to_method_case": functools.partial(to_method_case, config)}) - jinja_env.add_extension("jinja2.ext.loopcontrols") - return jinja_env + # pylint: disable=F0401 + sys.path.insert(1, os.path.abspath(jinja_dir)) + import jinja2 + + jinja_env = jinja2.Environment( + loader=jinja2.FileSystemLoader(module_path), + # Bytecode cache is not concurrency-safe unless pre-cached: + # if pre-cached this is read-only, but writing creates a race condition. + bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir), + keep_trailing_newline=True, # newline-terminate generated files + lstrip_blocks=True, # so can indent control flow tags + trim_blocks=True, + ) + jinja_env.filters.update({ + "to_title_case": + to_title_case, + "dash_to_camelcase": + dash_to_camelcase, + "to_method_case": + functools.partial(to_method_case, config), + }) + jinja_env.add_extension("jinja2.ext.loopcontrols") + return jinja_env def create_imported_type_definition(domain_name, type, imported_namespace): - # pylint: disable=W0622 - return { - "return_type": "std::unique_ptr<%s::%s::API::%s>" % ( - imported_namespace, domain_name, type["id"]), - "pass_type": "std::unique_ptr<%s::%s::API::%s>" % ( - imported_namespace, domain_name, type["id"]), - "to_raw_type": "%s.get()", - "to_pass_type": "std::move(%s)", - "to_rvalue": "std::move(%s)", - "type": "std::unique_ptr<%s::%s::API::%s>" % ( - imported_namespace, domain_name, type["id"]), - "raw_type": "%s::%s::API::%s" % ( - imported_namespace, domain_name, type["id"]), - "raw_pass_type": "%s::%s::API::%s*" % ( - imported_namespace, domain_name, type["id"]), - "raw_return_type": "%s::%s::API::%s*" % ( - imported_namespace, domain_name, type["id"]), - } + # pylint: disable=W0622 + return { + "return_type": + "std::unique_ptr<%s::%s::API::%s>" % + (imported_namespace, domain_name, type["id"]), + "pass_type": + "std::unique_ptr<%s::%s::API::%s>" % + (imported_namespace, domain_name, type["id"]), + "to_raw_type": + "%s.get()", + "to_pass_type": + "std::move(%s)", + "to_rvalue": + "std::move(%s)", + "type": + "std::unique_ptr<%s::%s::API::%s>" % + (imported_namespace, domain_name, type["id"]), + "raw_type": + "%s::%s::API::%s" % (imported_namespace, domain_name, type["id"]), + "raw_pass_type": + "%s::%s::API::%s*" % (imported_namespace, domain_name, type["id"]), + "raw_return_type": + "%s::%s::API::%s*" % (imported_namespace, domain_name, type["id"]), + } def create_user_type_definition(domain_name, type): - # pylint: disable=W0622 - return { - "return_type": "std::unique_ptr" % ( - domain_name, type["id"]), - "pass_type": "std::unique_ptr" % ( - domain_name, type["id"]), - "to_raw_type": "%s.get()", - "to_pass_type": "std::move(%s)", - "to_rvalue": "std::move(%s)", - "type": "std::unique_ptr" % (domain_name, type["id"]), - "raw_type": "protocol::%s::%s" % (domain_name, type["id"]), - "raw_pass_type": "protocol::%s::%s*" % (domain_name, type["id"]), - "raw_return_type": "protocol::%s::%s*" % (domain_name, type["id"]), - } + # pylint: disable=W0622 + return { + "return_type": + "std::unique_ptr" % (domain_name, type["id"]), + "pass_type": + "std::unique_ptr" % (domain_name, type["id"]), + "to_raw_type": + "%s.get()", + "to_pass_type": + "std::move(%s)", + "to_rvalue": + "std::move(%s)", + "type": + "std::unique_ptr" % (domain_name, type["id"]), + "raw_type": + "protocol::%s::%s" % (domain_name, type["id"]), + "raw_pass_type": + "protocol::%s::%s*" % (domain_name, type["id"]), + "raw_return_type": + "protocol::%s::%s*" % (domain_name, type["id"]), + } def create_object_type_definition(): - # pylint: disable=W0622 - return { - "return_type": "std::unique_ptr", - "pass_type": "std::unique_ptr", - "to_raw_type": "%s.get()", - "to_pass_type": "std::move(%s)", - "to_rvalue": "std::move(%s)", - "type": "std::unique_ptr", - "raw_type": "protocol::DictionaryValue", - "raw_pass_type": "protocol::DictionaryValue*", - "raw_return_type": "protocol::DictionaryValue*", - } + # pylint: disable=W0622 + return { + "return_type": "std::unique_ptr", + "pass_type": "std::unique_ptr", + "to_raw_type": "%s.get()", + "to_pass_type": "std::move(%s)", + "to_rvalue": "std::move(%s)", + "type": "std::unique_ptr", + "raw_type": "protocol::DictionaryValue", + "raw_pass_type": "protocol::DictionaryValue*", + "raw_return_type": "protocol::DictionaryValue*", + } def create_any_type_definition(): - # pylint: disable=W0622 - return { - "return_type": "std::unique_ptr", - "pass_type": "std::unique_ptr", - "to_raw_type": "%s.get()", - "to_pass_type": "std::move(%s)", - "to_rvalue": "std::move(%s)", - "type": "std::unique_ptr", - "raw_type": "protocol::Value", - "raw_pass_type": "protocol::Value*", - "raw_return_type": "protocol::Value*", - } + # pylint: disable=W0622 + return { + "return_type": "std::unique_ptr", + "pass_type": "std::unique_ptr", + "to_raw_type": "%s.get()", + "to_pass_type": "std::move(%s)", + "to_rvalue": "std::move(%s)", + "type": "std::unique_ptr", + "raw_type": "protocol::Value", + "raw_pass_type": "protocol::Value*", + "raw_return_type": "protocol::Value*", + } def create_string_type_definition(): - # pylint: disable=W0622 - return { - "return_type": "String", - "pass_type": "const String&", - "to_pass_type": "%s", - "to_raw_type": "%s", - "to_rvalue": "%s", - "type": "String", - "raw_type": "String", - "raw_pass_type": "const String&", - "raw_return_type": "String", - } + # pylint: disable=W0622 + return { + "return_type": "String", + "pass_type": "const String&", + "to_pass_type": "%s", + "to_raw_type": "%s", + "to_rvalue": "%s", + "type": "String", + "raw_type": "String", + "raw_pass_type": "const String&", + "raw_return_type": "String", + } def create_binary_type_definition(): - # pylint: disable=W0622 - return { - "return_type": "Binary", - "pass_type": "const Binary&", - "to_pass_type": "%s", - "to_raw_type": "%s", - "to_rvalue": "%s", - "type": "Binary", - "raw_type": "Binary", - "raw_pass_type": "const Binary&", - "raw_return_type": "Binary", - } + # pylint: disable=W0622 + return { + "return_type": "Binary", + "pass_type": "const Binary&", + "to_pass_type": "%s", + "to_raw_type": "%s", + "to_rvalue": "%s", + "type": "Binary", + "raw_type": "Binary", + "raw_pass_type": "const Binary&", + "raw_return_type": "Binary", + } def create_primitive_type_definition(type): - # pylint: disable=W0622 - typedefs = { - "number": "double", - "integer": "int", - "boolean": "bool" - } - defaults = { - "number": "0", - "integer": "0", - "boolean": "false" - } - jsontypes = { - "number": "TypeDouble", - "integer": "TypeInteger", - "boolean": "TypeBoolean", - } - return { - "return_type": typedefs[type], - "pass_type": typedefs[type], - "to_pass_type": "%s", - "to_raw_type": "%s", - "to_rvalue": "%s", - "type": typedefs[type], - "raw_type": typedefs[type], - "raw_pass_type": typedefs[type], - "raw_return_type": typedefs[type], - "default_value": defaults[type] - } + # pylint: disable=W0622 + typedefs = {"number": "double", "integer": "int", "boolean": "bool"} + defaults = {"number": "0", "integer": "0", "boolean": "false"} + jsontypes = { + "number": "TypeDouble", + "integer": "TypeInteger", + "boolean": "TypeBoolean", + } + return { + "return_type": typedefs[type], + "pass_type": typedefs[type], + "to_pass_type": "%s", + "to_raw_type": "%s", + "to_rvalue": "%s", + "type": typedefs[type], + "raw_type": typedefs[type], + "raw_pass_type": typedefs[type], + "raw_return_type": typedefs[type], + "default_value": defaults[type], + } def wrap_array_definition(type): - # pylint: disable=W0622 - return { - "return_type": "std::unique_ptr>" % type["raw_type"], - "pass_type": "std::unique_ptr>" % type["raw_type"], - "to_raw_type": "%s.get()", - "to_pass_type": "std::move(%s)", - "to_rvalue": "std::move(%s)", - "type": "std::unique_ptr>" % type["raw_type"], - "raw_type": "protocol::Array<%s>" % type["raw_type"], - "raw_pass_type": "protocol::Array<%s>*" % type["raw_type"], - "raw_return_type": "protocol::Array<%s>*" % type["raw_type"], - "out_type": "protocol::Array<%s>&" % type["raw_type"], - } + # pylint: disable=W0622 + return { + "return_type": + "std::unique_ptr>" % type["raw_type"], + "pass_type": "std::unique_ptr>" % type["raw_type"], + "to_raw_type": "%s.get()", + "to_pass_type": "std::move(%s)", + "to_rvalue": "std::move(%s)", + "type": "std::unique_ptr>" % type["raw_type"], + "raw_type": "protocol::Array<%s>" % type["raw_type"], + "raw_pass_type": "protocol::Array<%s>*" % type["raw_type"], + "raw_return_type": "protocol::Array<%s>*" % type["raw_type"], + "out_type": "protocol::Array<%s>&" % type["raw_type"], + } class Protocol(object): + def __init__(self, config): + self.config = config + self.json_api = {"domains": []} + self.imported_domains = [] + self.exported_domains = [] + self.generate_domains = self.read_protocol_file(config.protocol.path) + + if config.protocol.options: + self.generate_domains = [ + rule.domain for rule in config.protocol.options + ] + self.exported_domains = [ + rule.domain for rule in config.protocol.options + if hasattr(rule, "exported") + ] + + if config.imported: + self.imported_domains = self.read_protocol_file( + config.imported.path) + if config.imported.options: + self.imported_domains = [ + rule.domain for rule in config.imported.options + ] + + self.patch_full_qualified_refs() + self.create_notification_types() + self.create_type_definitions() + self.generate_used_types() + + def read_protocol_file(self, file_name): + input_file = open(file_name, "r") + parsed_json = pdl.loads(input_file.read(), file_name) + input_file.close() + version = "%s.%s" % ( + parsed_json["version"]["major"], + parsed_json["version"]["minor"], + ) + domains = [] + for domain in parsed_json["domains"]: + domains.append(domain["domain"]) + domain["version"] = version + self.json_api["domains"] += parsed_json["domains"] + return domains + + def patch_full_qualified_refs(self): + def patch_full_qualified_refs_in_domain(json, domain_name): + if isinstance(json, list): + for item in json: + patch_full_qualified_refs_in_domain(item, domain_name) + if not isinstance(json, dict): + return + for key in json: + if key == "type" and json[key] == "string": + json[key] = domain_name + ".string" + if key != "$ref": + patch_full_qualified_refs_in_domain(json[key], domain_name) + continue + if json["$ref"].find(".") == -1: + json["$ref"] = domain_name + "." + json["$ref"] + return + + for domain in self.json_api["domains"]: + patch_full_qualified_refs_in_domain(domain, domain["domain"]) + + def all_references(self, json): + refs = set() + if isinstance(json, list): + for item in json: + refs |= self.all_references(item) + if not isinstance(json, dict): + return refs + for key in json: + if key != "$ref": + refs |= self.all_references(json[key]) + else: + refs.add(json["$ref"]) + return refs + + def generate_used_types(self): + all_refs = set() + for domain in self.json_api["domains"]: + domain_name = domain["domain"] + if "commands" in domain: + for command in domain["commands"]: + if self.generate_command(domain_name, command["name"]): + all_refs |= self.all_references(command) + if "events" in domain: + for event in domain["events"]: + if self.generate_event(domain_name, event["name"]): + all_refs |= self.all_references(event) + all_refs.add( + "%s.%sNotification" % + (domain_name, to_title_case(event["name"]))) + + dependencies = self.generate_type_dependencies() + queue = set(all_refs) + while len(queue): + ref = queue.pop() + if ref in dependencies: + queue |= dependencies[ref] - all_refs + all_refs |= dependencies[ref] + self.used_types = all_refs + + def generate_type_dependencies(self): + dependencies = dict() + domains_with_types = (x for x in self.json_api["domains"] + if "types" in x) + for domain in domains_with_types: + domain_name = domain["domain"] + for type in domain["types"]: + related_types = self.all_references(type) + if len(related_types): + dependencies[domain_name + "." + + type["id"]] = related_types + return dependencies + + def create_notification_types(self): + for domain in self.json_api["domains"]: + if "events" in domain: + for event in domain["events"]: + event_type = dict() + event_type[ + "description"] = "Wrapper for notification params" + event_type["type"] = "object" + event_type["id"] = to_title_case( + event["name"]) + "Notification" + if "parameters" in event: + event_type["properties"] = copy.deepcopy( + event["parameters"]) + if "types" not in domain: + domain["types"] = list() + domain["types"].append(event_type) + + def create_type_definitions(self): + imported_namespace = "" + if self.config.imported: + imported_namespace = "::".join(self.config.imported.namespace) + self.type_definitions = {} + self.type_definitions["number"] = create_primitive_type_definition( + "number") + self.type_definitions["integer"] = create_primitive_type_definition( + "integer") + self.type_definitions["boolean"] = create_primitive_type_definition( + "boolean") + self.type_definitions["object"] = create_object_type_definition() + self.type_definitions["any"] = create_any_type_definition() + self.type_definitions["binary"] = create_binary_type_definition() + for domain in self.json_api["domains"]: + self.type_definitions[domain["domain"] + + ".string"] = create_string_type_definition() + self.type_definitions[domain["domain"] + + ".binary"] = create_binary_type_definition() + if not ("types" in domain): + continue + for type in domain["types"]: + type_name = domain["domain"] + "." + type["id"] + if (type["type"] == "object" + and domain["domain"] in self.imported_domains): + self.type_definitions[ + type_name] = create_imported_type_definition( + domain["domain"], type, imported_namespace) + elif type["type"] == "object": + self.type_definitions[ + type_name] = create_user_type_definition( + domain["domain"], type) + elif type["type"] == "array": + self.type_definitions[type_name] = self.resolve_type(type) + elif type["type"] == domain["domain"] + ".string": + self.type_definitions[ + type_name] = create_string_type_definition() + elif type["type"] == domain["domain"] + ".binary": + self.type_definitions[ + type_name] = create_binary_type_definition() + else: + self.type_definitions[ + type_name] = create_primitive_type_definition( + type["type"]) + + def check_options(self, options, domain, name, include_attr, exclude_attr, + default): + for rule in options: + if rule.domain != domain: + continue + if include_attr and hasattr(rule, include_attr): + return name in getattr(rule, include_attr) + if exclude_attr and hasattr(rule, exclude_attr): + return name not in getattr(rule, exclude_attr) + return default + return False - def __init__(self, config): - self.config = config - self.json_api = {"domains": []} - self.imported_domains = [] - self.exported_domains = [] - self.generate_domains = self.read_protocol_file(config.protocol.path) - - if config.protocol.options: - self.generate_domains = [rule.domain for rule in config.protocol.options] - self.exported_domains = [rule.domain for rule in config.protocol.options - if hasattr(rule, "exported")] - - if config.imported: - self.imported_domains = self.read_protocol_file(config.imported.path) - if config.imported.options: - self.imported_domains = [rule.domain - for rule in config.imported.options] - - self.patch_full_qualified_refs() - self.create_notification_types() - self.create_type_definitions() - self.generate_used_types() - - def read_protocol_file(self, file_name): - input_file = open(file_name, "r") - parsed_json = pdl.loads(input_file.read(), file_name) - input_file.close() - version = '%s.%s' % (parsed_json["version"]["major"], - parsed_json["version"]["minor"]) - domains = [] - for domain in parsed_json["domains"]: - domains.append(domain["domain"]) - domain["version"] = version - self.json_api["domains"] += parsed_json["domains"] - return domains - - def patch_full_qualified_refs(self): - def patch_full_qualified_refs_in_domain(json, domain_name): - if isinstance(json, list): - for item in json: - patch_full_qualified_refs_in_domain(item, domain_name) - if not isinstance(json, dict): - return - for key in json: - if key == "type" and json[key] == "string": - json[key] = domain_name + ".string" - if key != "$ref": - patch_full_qualified_refs_in_domain(json[key], domain_name) - continue - if json["$ref"].find(".") == -1: - json["$ref"] = domain_name + "." + json["$ref"] - return - - for domain in self.json_api["domains"]: - patch_full_qualified_refs_in_domain(domain, domain["domain"]) - - def all_references(self, json): - refs = set() - if isinstance(json, list): - for item in json: - refs |= self.all_references(item) - if not isinstance(json, dict): - return refs - for key in json: - if key != "$ref": - refs |= self.all_references(json[key]) - else: - refs.add(json["$ref"]) - return refs - - def generate_used_types(self): - all_refs = set() - for domain in self.json_api["domains"]: - domain_name = domain["domain"] - if "commands" in domain: + # ---- Begin of methods exposed to generator + + def type_definition(self, name): + return self.type_definitions[name] + + def resolve_type(self, prop): + if "$ref" in prop: + return self.type_definitions[prop["$ref"]] + if prop["type"] == "array": + return wrap_array_definition(self.resolve_type(prop["items"])) + return self.type_definitions[prop["type"]] + + def generate_command(self, domain, command): + if not self.config.protocol.options: + return domain in self.generate_domains + return self.check_options(self.config.protocol.options, domain, + command, "include", "exclude", True) + + def generate_event(self, domain, event): + if not self.config.protocol.options: + return domain in self.generate_domains + return self.check_options( + self.config.protocol.options, + domain, + event, + "include_events", + "exclude_events", + True, + ) + + def generate_type(self, domain, typename): + return domain + "." + typename in self.used_types + + def is_async_command(self, domain, command): + if not self.config.protocol.options: + return False + return self.check_options(self.config.protocol.options, domain, + command, "async_", None, False) + + def is_exported(self, domain, name): + if not self.config.protocol.options: + return False + return self.check_options(self.config.protocol.options, domain, name, + "exported", None, False) + + def is_imported(self, domain, name): + if not self.config.imported: + return False + if not self.config.imported.options: + return domain in self.imported_domains + return self.check_options(self.config.imported.options, domain, name, + "imported", None, False) + + def is_exported_domain(self, domain): + return domain in self.exported_domains + + def generate_disable(self, domain): + if "commands" not in domain: + return True for command in domain["commands"]: - if self.generate_command(domain_name, command["name"]): - all_refs |= self.all_references(command) - if "events" in domain: - for event in domain["events"]: - if self.generate_event(domain_name, event["name"]): - all_refs |= self.all_references(event) - all_refs.add('%s.%sNotification' % (domain_name, - to_title_case(event["name"]))) - - dependencies = self.generate_type_dependencies() - queue = set(all_refs) - while len(queue): - ref = queue.pop() - if ref in dependencies: - queue |= dependencies[ref] - all_refs - all_refs |= dependencies[ref] - self.used_types = all_refs - - def generate_type_dependencies(self): - dependencies = dict() - domains_with_types = (x for x in self.json_api["domains"] if "types" in x) - for domain in domains_with_types: - domain_name = domain["domain"] - for type in domain["types"]: - related_types = self.all_references(type) - if len(related_types): - dependencies[domain_name + "." + type["id"]] = related_types - return dependencies - - def create_notification_types(self): - for domain in self.json_api["domains"]: - if "events" in domain: - for event in domain["events"]: - event_type = dict() - event_type["description"] = "Wrapper for notification params" - event_type["type"] = "object" - event_type["id"] = to_title_case(event["name"]) + "Notification" - if "parameters" in event: - event_type["properties"] = copy.deepcopy(event["parameters"]) - if "types" not in domain: - domain["types"] = list() - domain["types"].append(event_type) - - def create_type_definitions(self): - imported_namespace = "" - if self.config.imported: - imported_namespace = "::".join(self.config.imported.namespace) - self.type_definitions = {} - self.type_definitions["number"] = create_primitive_type_definition("number") - self.type_definitions["integer"] = create_primitive_type_definition("integer") - self.type_definitions["boolean"] = create_primitive_type_definition("boolean") - self.type_definitions["object"] = create_object_type_definition() - self.type_definitions["any"] = create_any_type_definition() - self.type_definitions["binary"] = create_binary_type_definition() - for domain in self.json_api["domains"]: - self.type_definitions[domain["domain"] + ".string"] = ( - create_string_type_definition()) - self.type_definitions[domain["domain"] + ".binary"] = ( - create_binary_type_definition()) - if not ("types" in domain): - continue - for type in domain["types"]: - type_name = domain["domain"] + "." + type["id"] - if type["type"] == "object" and domain["domain"] in self.imported_domains: - self.type_definitions[type_name] = create_imported_type_definition( - domain["domain"], type, imported_namespace) - elif type["type"] == "object": - self.type_definitions[type_name] = create_user_type_definition( - domain["domain"], type) - elif type["type"] == "array": - self.type_definitions[type_name] = self.resolve_type(type) - elif type["type"] == domain["domain"] + ".string": - self.type_definitions[type_name] = create_string_type_definition() - elif type["type"] == domain["domain"] + ".binary": - self.type_definitions[type_name] = create_binary_type_definition() - else: - self.type_definitions[type_name] = create_primitive_type_definition( - type["type"]) - - def check_options(self, options, domain, name, include_attr, exclude_attr, - default): - for rule in options: - if rule.domain != domain: - continue - if include_attr and hasattr(rule, include_attr): - return name in getattr(rule, include_attr) - if exclude_attr and hasattr(rule, exclude_attr): - return name not in getattr(rule, exclude_attr) - return default - return False - - - # ---- Begin of methods exposed to generator - - def type_definition(self, name): - return self.type_definitions[name] - - def resolve_type(self, prop): - if "$ref" in prop: - return self.type_definitions[prop["$ref"]] - if prop["type"] == "array": - return wrap_array_definition(self.resolve_type(prop["items"])) - return self.type_definitions[prop["type"]] - - def generate_command(self, domain, command): - if not self.config.protocol.options: - return domain in self.generate_domains - return self.check_options(self.config.protocol.options, domain, command, - "include", "exclude", True) - - def generate_event(self, domain, event): - if not self.config.protocol.options: - return domain in self.generate_domains - return self.check_options(self.config.protocol.options, domain, event, - "include_events", "exclude_events", True) - - def generate_type(self, domain, typename): - return domain + "." + typename in self.used_types - - def is_async_command(self, domain, command): - if not self.config.protocol.options: - return False - return self.check_options(self.config.protocol.options, domain, command, - "async_", None, False) - - def is_exported(self, domain, name): - if not self.config.protocol.options: - return False - return self.check_options(self.config.protocol.options, domain, name, - "exported", None, False) - - def is_imported(self, domain, name): - if not self.config.imported: - return False - if not self.config.imported.options: - return domain in self.imported_domains - return self.check_options(self.config.imported.options, domain, name, - "imported", None, False) - - def is_exported_domain(self, domain): - return domain in self.exported_domains - - def generate_disable(self, domain): - if "commands" not in domain: - return True - for command in domain["commands"]: - if command["name"] == "disable" and self.generate_command( - domain["domain"], "disable"): - return False - return True + if command["name"] == "disable" and self.generate_command( + domain["domain"], "disable"): + return False + return True - def is_imported_dependency(self, domain): - return domain in self.generate_domains or domain in self.imported_domains + def is_imported_dependency(self, domain): + return domain in self.generate_domains or domain in self.imported_domains def main(): - jinja_dir, config_file, config = read_config() - - protocol = Protocol(config) - - if not config.exported and len(protocol.exported_domains): - sys.stderr.write(("Domains [%s] are exported, but config is missing export " - "entry\n\n") % ", ".join(protocol.exported_domains)) - exit(1) - - if not os.path.exists(config.protocol.output): - os.mkdir(config.protocol.output) - if len(protocol.exported_domains) and not os.path.exists( - config.exported.output): - os.mkdir(config.exported.output) - jinja_env = initialize_jinja_env(jinja_dir, config.protocol.output, config) - - inputs = [] - inputs.append(__file__) - inputs.append(config_file) - inputs.append(config.protocol.path) - if config.imported: - inputs.append(config.imported.path) - templates_dir = os.path.join(module_path, "templates") - inputs.append(os.path.join(templates_dir, "TypeBuilder_h.template")) - inputs.append(os.path.join(templates_dir, "TypeBuilder_cpp.template")) - inputs.append(os.path.join(templates_dir, "Exported_h.template")) - inputs.append(os.path.join(templates_dir, "Imported_h.template")) - - h_template = jinja_env.get_template("templates/TypeBuilder_h.template") - cpp_template = jinja_env.get_template("templates/TypeBuilder_cpp.template") - exported_template = jinja_env.get_template("templates/Exported_h.template") - imported_template = jinja_env.get_template("templates/Imported_h.template") - - outputs = dict() - - for domain in protocol.json_api["domains"]: - class_name = domain["domain"] - file_name = config.protocol.file_name_prefix + class_name - template_context = { - "protocol": protocol, - "config": config, - "domain": domain, - "join_arrays": join_arrays, - "format_include": functools.partial(format_include, config), - "format_domain_include": functools.partial(format_domain_include, config), - } - - if domain["domain"] in protocol.generate_domains: - outputs[os.path.join(config.protocol.output, to_file_name( - config, file_name + ".h"))] = h_template.render(template_context) - outputs[os.path.join(config.protocol.output, to_file_name( - config, file_name + ".cpp"))] = cpp_template.render(template_context) - if domain["domain"] in protocol.exported_domains: - outputs[os.path.join(config.exported.output, to_file_name( - config, file_name + ".h"))] = exported_template.render( - template_context) - if domain["domain"] in protocol.imported_domains: - outputs[os.path.join(config.protocol.output, to_file_name( - config, file_name + ".h"))] = imported_template.render( - template_context) - - if config.lib: - template_context = { - "config": config, - "format_include": functools.partial(format_include, config), - } - - lib_templates_dir = os.path.join(module_path, "lib") - # Note these should be sorted in the right order. - # TODO(dgozman): sort them programmatically based on commented includes. - protocol_h_templates = [ - "ErrorSupport_h.template", - "Values_h.template", - "Object_h.template", - "ValueConversions_h.template", - "DispatcherBase_h.template", - "Parser_h.template", - ] - - protocol_cpp_templates = [ - "Protocol_cpp.template", - "ErrorSupport_cpp.template", - "Values_cpp.template", - "Object_cpp.template", - "DispatcherBase_cpp.template", - "Parser_cpp.template", - ] - - forward_h_templates = [ - "Forward_h.template", - "FrontendChannel_h.template", - ] - - base_string_adapter_h_templates = [ - "base_string_adapter_h.template", - ] - - base_string_adapter_cc_templates = [ - "base_string_adapter_cc.template", - ] - - def generate_lib_file(file_name, template_files): - parts = [] - for template_file in template_files: - inputs.append(os.path.join(lib_templates_dir, template_file)) - template = jinja_env.get_template("lib/" + template_file) - parts.append(template.render(template_context)) - outputs[file_name] = "\n\n".join(parts) - - generate_lib_file(os.path.join(config.lib.output, to_file_name( - config, "Forward.h")), forward_h_templates) - generate_lib_file(os.path.join(config.lib.output, to_file_name( - config, "Protocol.h")), protocol_h_templates) - generate_lib_file(os.path.join(config.lib.output, to_file_name( - config, "Protocol.cpp")), protocol_cpp_templates) - generate_lib_file(os.path.join(config.lib.output, to_file_name( - config, "base_string_adapter.h")), base_string_adapter_h_templates) - generate_lib_file(os.path.join(config.lib.output, to_file_name( - config, "base_string_adapter.cc")), base_string_adapter_cc_templates) - - # Make gyp / make generatos happy, otherwise make rebuilds world. - inputs_ts = max(map(os.path.getmtime, inputs)) - up_to_date = True - for output_file in outputs.keys(): - if (not os.path.exists(output_file) - or os.path.getmtime(output_file) < inputs_ts): - up_to_date = False - break - if up_to_date: - sys.exit() - - for file_name, content in outputs.items(): - out_file = open(file_name, "w") - out_file.write(content) - out_file.close() + jinja_dir, config_file, config = read_config() + + protocol = Protocol(config) + + if not config.exported and len(protocol.exported_domains): + sys.stderr.write( + ("Domains [%s] are exported, but config is missing export " + "entry\n\n") % ", ".join(protocol.exported_domains)) + exit(1) + + if not os.path.exists(config.protocol.output): + os.mkdir(config.protocol.output) + if len(protocol.exported_domains) and not os.path.exists( + config.exported.output): + os.mkdir(config.exported.output) + jinja_env = initialize_jinja_env(jinja_dir, config.protocol.output, config) + + inputs = [] + inputs.append(__file__) + inputs.append(config_file) + inputs.append(config.protocol.path) + if config.imported: + inputs.append(config.imported.path) + templates_dir = os.path.join(module_path, "templates") + inputs.append(os.path.join(templates_dir, "TypeBuilder_h.template")) + inputs.append(os.path.join(templates_dir, "TypeBuilder_cpp.template")) + inputs.append(os.path.join(templates_dir, "Exported_h.template")) + inputs.append(os.path.join(templates_dir, "Imported_h.template")) + + h_template = jinja_env.get_template("templates/TypeBuilder_h.template") + cpp_template = jinja_env.get_template("templates/TypeBuilder_cpp.template") + exported_template = jinja_env.get_template("templates/Exported_h.template") + imported_template = jinja_env.get_template("templates/Imported_h.template") + + outputs = dict() + + for domain in protocol.json_api["domains"]: + class_name = domain["domain"] + file_name = config.protocol.file_name_prefix + class_name + template_context = { + "protocol": + protocol, + "config": + config, + "domain": + domain, + "join_arrays": + join_arrays, + "format_include": + functools.partial(format_include, config), + "format_domain_include": + functools.partial(format_domain_include, config), + } + + if domain["domain"] in protocol.generate_domains: + outputs[os.path.join( + config.protocol.output, + to_file_name(config, file_name + + ".h"))] = h_template.render(template_context) + outputs[os.path.join( + config.protocol.output, + to_file_name(config, file_name + + ".cpp"))] = cpp_template.render(template_context) + if domain["domain"] in protocol.exported_domains: + outputs[os.path.join( + config.exported.output, + to_file_name( + config, file_name + + ".h"))] = exported_template.render(template_context) + if domain["domain"] in protocol.imported_domains: + outputs[os.path.join( + config.protocol.output, to_file_name( + config, file_name + + ".h"))] = imported_template.render(template_context) + + if config.lib: + template_context = { + "config": config, + "format_include": functools.partial(format_include, config), + } + + lib_templates_dir = os.path.join(module_path, "lib") + # Note these should be sorted in the right order. + # TODO(dgozman): sort them programmatically based on commented includes. + protocol_h_templates = [ + "ErrorSupport_h.template", + "Values_h.template", + "Object_h.template", + "ValueConversions_h.template", + "DispatcherBase_h.template", + "Parser_h.template", + ] + + protocol_cpp_templates = [ + "Protocol_cpp.template", + "ErrorSupport_cpp.template", + "Values_cpp.template", + "Object_cpp.template", + "DispatcherBase_cpp.template", + "Parser_cpp.template", + ] + + forward_h_templates = [ + "Forward_h.template", "FrontendChannel_h.template" + ] + + base_string_adapter_h_templates = ["base_string_adapter_h.template"] + + base_string_adapter_cc_templates = ["base_string_adapter_cc.template"] + + def generate_lib_file(file_name, template_files): + parts = [] + for template_file in template_files: + inputs.append(os.path.join(lib_templates_dir, template_file)) + template = jinja_env.get_template("lib/" + template_file) + parts.append(template.render(template_context)) + outputs[file_name] = "\n\n".join(parts) + + generate_lib_file( + os.path.join(config.lib.output, to_file_name(config, "Forward.h")), + forward_h_templates, + ) + generate_lib_file( + os.path.join(config.lib.output, to_file_name(config, + "Protocol.h")), + protocol_h_templates, + ) + generate_lib_file( + os.path.join(config.lib.output, + to_file_name(config, "Protocol.cpp")), + protocol_cpp_templates, + ) + generate_lib_file( + os.path.join(config.lib.output, + to_file_name(config, "base_string_adapter.h")), + base_string_adapter_h_templates, + ) + generate_lib_file( + os.path.join(config.lib.output, + to_file_name(config, "base_string_adapter.cc")), + base_string_adapter_cc_templates, + ) + + # Make gyp / make generatos happy, otherwise make rebuilds world. + inputs_ts = max(map(os.path.getmtime, inputs)) + up_to_date = True + for output_file in outputs.keys(): + if not os.path.exists( + output_file) or os.path.getmtime(output_file) < inputs_ts: + up_to_date = False + break + if up_to_date: + sys.exit() + + for file_name, content in outputs.items(): + out_file = open(file_name, "w") + out_file.write(content) + out_file.close() if __name__ == "__main__": - main() + main() diff --git a/doc/api/cli.md b/doc/api/cli.md index 31e554024285ff..34ce683b4d67c7 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -21,6 +21,7 @@ Execute without arguments to start the [REPL][]. _For more info about `node inspect`, please see the [debugger][] documentation._ ## Options + -All options, including V8 options, allow words to be separated by both -dashes (`-`) or underscores (`_`). +All options, including V8 options, allow words to be separated by both dashes +(`-`) or underscores (`_`). For example, `--pending-deprecation` is equivalent to `--pending_deprecation`. ### `-` + Alias for stdin, analogous to the use of - in other command line utilities, -meaning that the script will be read from stdin, and the rest of the options -are passed to that script. +meaning that the script will be read from stdin, and the rest of the options are +passed to that script. ### `--` + Indicate the end of node options. Pass the rest of the arguments to the script. -If no script filename or eval/print script is supplied prior to this, then -the next argument will be used as a script filename. +If no script filename or eval/print script is supplied prior to this, then the +next argument will be used as a script filename. ### `--abort-on-uncaught-exception` + @@ -65,6 +69,7 @@ If this flag is passed, the behavior can still be set to not abort through `domain` module that uses it). ### `--completion-bash` + @@ -77,6 +82,7 @@ $ source node_bash_completion ``` ### `--cpu-prof` + @@ -86,11 +92,11 @@ added: v12.0.0 Starts the V8 CPU profiler on start up, and writes the CPU profile to disk before exit. -If `--cpu-prof-dir` is not specified, the generated profile will be placed -in the current working directory. +If `--cpu-prof-dir` is not specified, the generated profile will be placed in +the current working directory. -If `--cpu-prof-name` is not specified, the generated profile will be -named `CPU.${yyyymmdd}.${hhmmss}.${pid}.${tid}.${seq}.cpuprofile`. +If `--cpu-prof-name` is not specified, the generated profile will be named +`CPU.${yyyymmdd}.${hhmmss}.${pid}.${tid}.${seq}.cpuprofile`. ```console $ node --cpu-prof index.js @@ -99,26 +105,29 @@ CPU.20190409.202950.15293.0.0.cpuprofile ``` ### `--cpu-prof-dir` + > Stability: 1 - Experimental -Specify the directory where the CPU profiles generated by `--cpu-prof` will -be placed. +Specify the directory where the CPU profiles generated by `--cpu-prof` will be +placed. ### `--cpu-prof-interval` + > Stability: 1 - Experimental -Specify the sampling interval in microseconds for the CPU profiles generated -by `--cpu-prof`. The default is 1000 microseconds. +Specify the sampling interval in microseconds for the CPU profiles generated by +`--cpu-prof`. The default is 1000 microseconds. ### `--cpu-prof-name` + @@ -128,6 +137,7 @@ added: v12.0.0 Specify the file name of the CPU profile generated by `--cpu-prof`. ### `--enable-fips` + @@ -136,6 +146,7 @@ Enable FIPS-compliant crypto at startup. (Requires Node.js to be built with `./configure --openssl-fips`.) ### `--es-module-specifier-resolution=mode` + @@ -143,13 +154,14 @@ added: v12.0.0 To be used in conjunction with `--experimental-modules`. Sets the resolution algorithm for resolving specifiers. Valid options are `explicit` and `node`. -The default is `explicit`, which requires providing the full path to a -module. The `node` mode will enable support for optional file extensions and -the ability to import a directory that has an index file. +The default is `explicit`, which requires providing the full path to a module. +The `node` mode will enable support for optional file extensions and the ability +to import a directory that has an index file. Please see [customizing esm specifier resolution][] for example usage. ### `--experimental-exports` + @@ -157,6 +169,7 @@ added: v12.7.0 Enable experimental resolution using the `exports` field in `package.json`. ### `--experimental-modules` + @@ -164,6 +177,7 @@ added: v8.5.0 Enable experimental ES module support and caching modules. ### `--experimental-policy` + @@ -171,6 +185,7 @@ added: v11.8.0 Use the specified file as a security policy. ### `--experimental-repl-await` + @@ -178,6 +193,7 @@ added: v10.0.0 Enable experimental top-level `await` keyword support in REPL. ### `--experimental-report` + @@ -185,6 +201,7 @@ added: v11.8.0 Enable experimental diagnostic report feature. ### `--experimental-vm-modules` + @@ -192,6 +209,7 @@ added: v9.6.0 Enable experimental ES Module support in the `vm` module. ### `--experimental-wasm-modules` + @@ -199,6 +217,7 @@ added: v12.3.0 Enable experimental WebAssembly module support. ### `--force-fips` + @@ -207,6 +226,7 @@ Force FIPS-compliant crypto on startup. (Cannot be disabled from script code.) (Same requirements as `--enable-fips`.) ### `--frozen-intrinsics` + @@ -219,10 +239,11 @@ Support is currently only provided for the root context and no guarantees are currently provided that `global.Array` is indeed the default intrinsic reference. Code may break under this flag. -`--require` runs prior to freezing intrinsics in order to allow polyfills to -be added. +`--require` runs prior to freezing intrinsics in order to allow polyfills to be +added. ### `--heapsnapshot-signal=signal` + @@ -241,6 +262,7 @@ Heap.20190718.133405.15554.0.001.heapsnapshot ``` ### `--heap-prof` + @@ -250,11 +272,11 @@ added: v12.4.0 Starts the V8 heap profiler on start up, and writes the heap profile to disk before exit. -If `--heap-prof-dir` is not specified, the generated profile will be placed -in the current working directory. +If `--heap-prof-dir` is not specified, the generated profile will be placed in +the current working directory. -If `--heap-prof-name` is not specified, the generated profile will be -named `Heap.${yyyymmdd}.${hhmmss}.${pid}.${tid}.${seq}.heapprofile`. +If `--heap-prof-name` is not specified, the generated profile will be named +`Heap.${yyyymmdd}.${hhmmss}.${pid}.${tid}.${seq}.heapprofile`. ```console $ node --heap-prof index.js @@ -263,16 +285,18 @@ Heap.20190409.202950.15293.0.001.heapprofile ``` ### `--heap-prof-dir` + > Stability: 1 - Experimental -Specify the directory where the heap profiles generated by `--heap-prof` will -be placed. +Specify the directory where the heap profiles generated by `--heap-prof` will be +placed. ### `--heap-prof-interval` + @@ -280,9 +304,10 @@ added: v12.4.0 > Stability: 1 - Experimental Specify the average sampling interval in bytes for the heap profiles generated -by `--heap-prof`. The default is 512 * 1024 bytes. +by `--heap-prof`. The default is 512 \* 1024 bytes. ### `--heap-prof-name` + @@ -295,6 +320,7 @@ Generates a heap snapshot each time the process receives the specified signal. `signal` must be a valid signal name. Disabled by default. ### `--icu-data-dir=file` + @@ -302,6 +328,7 @@ added: v0.11.15 Specify ICU data load path. (Overrides `NODE_ICU_DATA`.) ### `--input-type=type` + @@ -313,20 +340,22 @@ input as CommonJS or as an ES module. String input is input via `--eval`, Valid values are `"commonjs"` and `"module"`. The default is `"commonjs"`. ### `--inspect-brk[=[host:]port]` + -Activate inspector on `host:port` and break at start of user script. -Default `host:port` is `127.0.0.1:9229`. +Activate inspector on `host:port` and break at start of user script. Default +`host:port` is `127.0.0.1:9229`. ### `--inspect-port=[host:]port` + -Set the `host:port` to be used when the inspector is activated. -Useful when activating the inspector by sending the `SIGUSR1` signal. +Set the `host:port` to be used when the inspector is activated. Useful when +activating the inspector by sending the `SIGUSR1` signal. Default host is `127.0.0.1`. @@ -334,6 +363,7 @@ See the [security warning](#inspector_security) below regarding the `host` parameter usage. ### `--inspect[=[host:]port]` + @@ -341,20 +371,21 @@ added: v6.3.0 Activate inspector on `host:port`. Default is `127.0.0.1:9229`. V8 inspector integration allows tools such as Chrome DevTools and IDEs to debug -and profile Node.js instances. The tools attach to Node.js instances via a -tcp port and communicate using the [Chrome DevTools Protocol][]. +and profile Node.js instances. The tools attach to Node.js instances via a tcp +port and communicate using the [Chrome DevTools Protocol][]. + #### Warning: binding inspector to a public IP:port combination is insecure Binding the inspector to a public IP (including `0.0.0.0`) with an open port is -insecure, as it allows external hosts to connect to the inspector and perform -a [remote code execution][] attack. +insecure, as it allows external hosts to connect to the inspector and perform a +[remote code execution][] attack. If specifying a host, make sure that either: -* The host is not accessible from public networks. -* A firewall disallows unwanted connections on the port. +- The host is not accessible from public networks. +- A firewall disallows unwanted connections on the port. **More specifically, `--inspect=0.0.0.0` is insecure if the port (`9229` by default) is not firewall-protected.** @@ -369,6 +400,7 @@ By default inspector websocket url is available in stderr and under `/json/list` endpoint on `http://host:port/json/list`. ### `--loader=file` + @@ -376,6 +408,7 @@ added: v9.0.0 Specify the `file` of the custom [experimental ECMAScript Module][] loader. ### `--max-http-header-size=size` + @@ -383,6 +416,7 @@ added: v11.6.0 Specify the maximum size, in bytes, of HTTP headers. Defaults to 8KB. ### `--napi-modules` + @@ -390,6 +424,7 @@ added: v7.10.0 This option is a no-op. It is kept for compatibility. ### `--no-deprecation` + @@ -397,6 +432,7 @@ added: v0.8.0 Silence deprecation warnings. ### `--no-force-async-hooks-checks` + @@ -405,6 +441,7 @@ Disables runtime checks for `async_hooks`. These will still be enabled dynamically when `async_hooks` is enabled. ### `--no-warnings` + @@ -412,6 +449,7 @@ added: v6.0.0 Silence all process warnings (including deprecations). ### `--openssl-config=file` + @@ -421,6 +459,7 @@ used to enable FIPS-compliant crypto if Node.js is built with `./configure --openssl-fips`. ### `--pending-deprecation` + @@ -428,13 +467,14 @@ added: v8.0.0 Emit pending deprecation warnings. Pending deprecations are generally identical to a runtime deprecation with the -notable exception that they are turned *off* by default and will not be emitted +notable exception that they are turned _off_ by default and will not be emitted unless either the `--pending-deprecation` command line flag, or the `NODE_PENDING_DEPRECATION=1` environment variable, is set. Pending deprecations are used to provide a kind of selective "early warning" mechanism that developers may leverage to detect deprecated API usage. ### `--policy-integrity=sri` + @@ -446,6 +486,7 @@ the specified integrity. It expects a [Subresource Integrity][] string as a parameter. ### `--preserve-symlinks` + @@ -456,10 +497,10 @@ caching modules. By default, when Node.js loads a module from a path that is symbolically linked to a different on-disk location, Node.js will dereference the link and use the actual on-disk "real path" of the module as both an identifier and as a root -path to locate other dependency modules. In most cases, this default behavior -is acceptable. However, when using symbolically linked peer dependencies, as -illustrated in the example below, the default behavior causes an exception to -be thrown if `moduleA` attempts to require `moduleB` as a peer dependency: +path to locate other dependency modules. In most cases, this default behavior is +acceptable. However, when using symbolically linked peer dependencies, as +illustrated in the example below, the default behavior causes an exception to be +thrown if `moduleA` attempts to require `moduleB` as a peer dependency: ```text {appDir} @@ -475,14 +516,14 @@ be thrown if `moduleA` attempts to require `moduleB` as a peer dependency: └── package.json ``` -The `--preserve-symlinks` command line flag instructs Node.js to use the -symlink path for modules as opposed to the real path, allowing symbolically -linked peer dependencies to be found. +The `--preserve-symlinks` command line flag instructs Node.js to use the symlink +path for modules as opposed to the real path, allowing symbolically linked peer +dependencies to be found. Note, however, that using `--preserve-symlinks` can have other side effects. -Specifically, symbolically linked *native* modules can fail to load if those -are linked from more than one location in the dependency tree (Node.js would -see those as two separate modules and would attempt to load the module multiple +Specifically, symbolically linked _native_ modules can fail to load if those are +linked from more than one location in the dependency tree (Node.js would see +those as two separate modules and would attempt to load the module multiple times, causing an exception to be thrown). The `--preserve-symlinks` flag does not apply to the main module, which allows @@ -490,6 +531,7 @@ The `--preserve-symlinks` flag does not apply to the main module, which allows behavior for the main module, also use `--preserve-symlinks-main`. ### `--preserve-symlinks-main` + @@ -501,14 +543,15 @@ This flag exists so that the main module can be opted-in to the same behavior that `--preserve-symlinks` gives to all other imports; they are separate flags, however, for backward compatibility with older Node.js versions. -`--preserve-symlinks-main` does not imply `--preserve-symlinks`; it -is expected that `--preserve-symlinks-main` will be used in addition to +`--preserve-symlinks-main` does not imply `--preserve-symlinks`; it is expected +that `--preserve-symlinks-main` will be used in addition to `--preserve-symlinks` when it is not desirable to follow symlinks before resolving relative paths. See `--preserve-symlinks` for more information. ### `--prof` + @@ -516,6 +559,7 @@ added: v2.0.0 Generate V8 profiler output. ### `--prof-process` + @@ -523,16 +567,18 @@ added: v5.2.0 Process V8 profiler output generated using the V8 option `--prof`. ### `--redirect-warnings=file` + -Write process warnings to the given file instead of printing to stderr. The -file will be created if it does not exist, and will be appended to if it does. -If an error occurs while attempting to write the warning to the file, the -warning will be written to stderr instead. +Write process warnings to the given file instead of printing to stderr. The file +will be created if it does not exist, and will be appended to if it does. If an +error occurs while attempting to write the warning to the file, the warning will +be written to stderr instead. ### `--report-directory=directory` + -Enables the report to be triggered on fatal errors (internal errors within -the Node.js runtime such as out of memory) that lead to termination of the +Enables the report to be triggered on fatal errors (internal errors within the +Node.js runtime such as out of memory) that lead to termination of the application, if `--experimental-report` is enabled. Useful to inspect various diagnostic data elements such as heap, stack, event loop state, resource consumption etc. to reason about the fatal error. ### `--report-on-signal` + @@ -621,6 +673,7 @@ added: v0.11.14 Throw errors for deprecations. ### `--title=title` + @@ -628,6 +681,7 @@ added: v10.7.0 Set `process.title` on startup. ### `--tls-cipher-list=list` + @@ -636,6 +690,7 @@ Specify an alternative default TLS cipher list. Requires Node.js to be built with crypto support (default). ### `--tls-max-v1.2` + @@ -644,6 +699,7 @@ Set [`tls.DEFAULT_MAX_VERSION`][] to 'TLSv1.2'. Use to disable support for TLSv1.3. ### `--tls-max-v1.3` + @@ -652,6 +708,7 @@ Set default [`tls.DEFAULT_MAX_VERSION`][] to 'TLSv1.3'. Use to enable support for TLSv1.3. ### `--tls-min-v1.0` + @@ -660,6 +717,7 @@ Set default [`tls.DEFAULT_MIN_VERSION`][] to 'TLSv1'. Use for compatibility with old TLS clients or servers. ### `--tls-min-v1.1` + @@ -668,6 +726,7 @@ Set default [`tls.DEFAULT_MIN_VERSION`][] to 'TLSv1.1'. Use for compatibility with old TLS clients or servers. ### `--tls-min-v1.2` + @@ -677,6 +736,7 @@ Set default [`tls.DEFAULT_MIN_VERSION`][] to 'TLSv1.2'. This is the default for versions. ### `--tls-min-v1.3` + @@ -685,6 +745,7 @@ Set default [`tls.DEFAULT_MIN_VERSION`][] to 'TLSv1.3'. Use to disable support for TLSv1.2, which is not as secure as TLSv1.3. ### `--trace-deprecation` + @@ -692,6 +753,7 @@ added: v0.8.0 Print stack traces for deprecations. ### `--trace-event-categories` + @@ -700,14 +762,16 @@ A comma separated list of categories that should be traced when trace event tracing is enabled using `--trace-events-enabled`. ### `--trace-event-file-pattern` + -Template string specifying the filepath for the trace event data, it -supports `${rotation}` and `${pid}`. +Template string specifying the filepath for the trace event data, it supports +`${rotation}` and `${pid}`. ### `--trace-events-enabled` + @@ -715,6 +779,7 @@ added: v7.7.0 Enables the collection of trace event tracing information. ### `--trace-sync-io` + @@ -723,6 +788,7 @@ Prints a stack trace whenever synchronous I/O is detected after the first turn of the event loop. ### `--trace-tls` + @@ -731,6 +797,7 @@ Prints TLS packet trace information to `stderr`. This can be used to debug TLS connection problems. ### `--trace-warnings` + @@ -738,6 +805,7 @@ added: v6.0.0 Print stack traces for process warnings (including deprecations). ### `--track-heap-objects` + @@ -745,6 +813,7 @@ added: v2.4.0 Track heap object allocations for heap snapshots. ### `--unhandled-rejections=mode` + @@ -756,19 +825,19 @@ is used. Using this flag allows to change what should happen when an unhandled rejection occurs. One of three modes can be chosen: -* `strict`: Raise the unhandled rejection as an uncaught exception. -* `warn`: Always trigger a warning, no matter if the [`unhandledRejection`][] +- `strict`: Raise the unhandled rejection as an uncaught exception. +- `warn`: Always trigger a warning, no matter if the [`unhandledRejection`][] hook is set or not but do not print the deprecation warning. -* `none`: Silence all warnings. +- `none`: Silence all warnings. ### `--use-bundled-ca`, `--use-openssl-ca` + -Use bundled Mozilla CA store as supplied by current Node.js version -or use OpenSSL's default CA store. The default store is selectable -at build-time. +Use bundled Mozilla CA store as supplied by current Node.js version or use +OpenSSL's default CA store. The default store is selectable at build-time. The bundled CA store, as supplied by Node.js, is a snapshot of Mozilla CA store that is fixed at release time. It is identical on all supported platforms. @@ -782,6 +851,7 @@ environment variables. See `SSL_CERT_DIR` and `SSL_CERT_FILE`. ### `--v8-options` + @@ -789,6 +859,7 @@ added: v0.1.3 Print V8 command line options. ### `--v8-pool-size=num` + @@ -798,10 +869,11 @@ Set V8's thread pool size which will be used to allocate background jobs. If set to `0` then V8 will choose an appropriate size of the thread pool based on the number of online processors. -If the value provided is larger than V8's maximum, then the largest value -will be chosen. +If the value provided is larger than V8's maximum, then the largest value will +be chosen. ### `--zero-fill-buffers` + @@ -810,6 +882,7 @@ Automatically zero-fills all newly allocated [`Buffer`][] and [`SlowBuffer`][] instances. ### `-c`, `--check` + -Evaluate the following argument as JavaScript. The modules which are -predefined in the REPL can also be used in `script`. +Evaluate the following argument as JavaScript. The modules which are predefined +in the REPL can also be used in `script`. On Windows, using `cmd.exe` a single quote will not work correctly because it -only recognizes double `"` for quoting. In Powershell or Git bash, both `'` -and `"` are usable. +only recognizes double `"` for quoting. In Powershell or Git bash, both `'` and +`"` are usable. ### `-h`, `--help` + -Print node command line options. -The output of this option is less detailed than this document. +Print node command line options. The output of this option is less detailed than +this document. ### `-i`, `--interactive` + @@ -854,6 +930,7 @@ added: v0.7.7 Opens the REPL even if stdin does not appear to be a terminal. ### `-p`, `--print "script"` + Preload the specified module at startup. -Follows `require()`'s module resolution -rules. `module` may be either a path to a file, or a node module name. +Follows `require()`'s module resolution rules. `module` may be either a path to +a file, or a node module name. ### `-v`, `--version` + @@ -884,6 +963,7 @@ Print node's version. ## Environment Variables ### `NODE_DEBUG=module[,…]` + @@ -895,6 +975,7 @@ added: v0.1.32 `','`-separated list of core C++ modules that should print debug information. ### `NODE_DISABLE_COLORS=1` + @@ -902,6 +983,7 @@ added: v0.3.0 When set, colors will not be used in the REPL. ### `NODE_EXTRA_CA_CERTS=file` + @@ -909,16 +991,17 @@ added: v7.3.0 When set, the well known "root" CAs (like VeriSign) will be extended with the extra certificates in `file`. The file should consist of one or more trusted certificates in PEM format. A message will be emitted (once) with -[`process.emitWarning()`][emit_warning] if the file is missing or -malformed, but any errors are otherwise ignored. +[`process.emitWarning()`][emit_warning] if the file is missing or malformed, but +any errors are otherwise ignored. -Neither the well known nor extra certificates are used when the `ca` -options property is explicitly specified for a TLS or HTTPS client or server. +Neither the well known nor extra certificates are used when the `ca` options +property is explicitly specified for a TLS or HTTPS client or server. -This environment variable is ignored when `node` runs as setuid root or -has Linux file capabilities set. +This environment variable is ignored when `node` runs as setuid root or has +Linux file capabilities set. ### `NODE_ICU_DATA=file` + @@ -927,6 +1010,7 @@ Data path for ICU (`Intl` object) data. Will extend linked-in data when compiled with small-icu support. ### `NODE_NO_WARNINGS=1` + @@ -934,18 +1018,18 @@ added: v6.11.0 When set to `1`, process warnings are silenced. ### `NODE_OPTIONS=options...` + A space-separated list of command line options. `options...` are interpreted -before command line options, so command line options will override or -compound after anything in `options...`. Node.js will exit with an error if -an option that is not allowed in the environment is used, such as `-p` or a -script file. +before command line options, so command line options will override or compound +after anything in `options...`. Node.js will exit with an error if an option +that is not allowed in the environment is used, such as `-p` or a script file. -In case an option value happens to contain a space (for example a path listed -in `--require`), it must be escaped using double quotes. For example: +In case an option value happens to contain a space (for example a path listed in +`--require`), it must be escaped using double quotes. For example: ```bash NODE_OPTIONS='--require "./my path/file.js"' @@ -960,8 +1044,8 @@ NODE_OPTIONS='--inspect=localhost:4444' node --inspect=localhost:5555 ``` A flag that can be passed multiple times will be treated as if its -`NODE_OPTIONS` instances were passed first, and then its command line -instances afterwards: +`NODE_OPTIONS` instances were passed first, and then its command line instances +afterwards: ```bash NODE_OPTIONS='--require "./a.js"' node --require "./b.js" @@ -970,83 +1054,88 @@ node --require "./a.js" --require "./b.js" ``` Node.js options that are allowed are: + -* `--enable-fips` -* `--es-module-specifier-resolution` -* `--experimental-exports` -* `--experimental-modules` -* `--experimental-policy` -* `--experimental-repl-await` -* `--experimental-report` -* `--experimental-vm-modules` -* `--experimental-wasm-modules` -* `--force-fips` -* `--frozen-intrinsics` -* `--heapsnapshot-signal` -* `--http-parser` -* `--icu-data-dir` -* `--input-type` -* `--inspect-brk` -* `--inspect-port`, `--debug-port` -* `--inspect-publish-uid` -* `--inspect` -* `--loader` -* `--max-http-header-size` -* `--napi-modules` -* `--no-deprecation` -* `--no-force-async-hooks-checks` -* `--no-warnings` -* `--openssl-config` -* `--pending-deprecation` -* `--policy-integrity` -* `--preserve-symlinks-main` -* `--preserve-symlinks` -* `--prof-process` -* `--redirect-warnings` -* `--report-directory` -* `--report-filename` -* `--report-on-fatalerror` -* `--report-on-signal` -* `--report-signal` -* `--report-uncaught-exception` -* `--require`, `-r` -* `--throw-deprecation` -* `--title` -* `--tls-cipher-list` -* `--tls-max-v1.2` -* `--tls-max-v1.3` -* `--tls-min-v1.0` -* `--tls-min-v1.1` -* `--tls-min-v1.2` -* `--tls-min-v1.3` -* `--trace-deprecation` -* `--trace-event-categories` -* `--trace-event-file-pattern` -* `--trace-events-enabled` -* `--trace-sync-io` -* `--trace-tls` -* `--trace-warnings` -* `--track-heap-objects` -* `--unhandled-rejections` -* `--use-bundled-ca` -* `--use-openssl-ca` -* `--v8-pool-size` -* `--zero-fill-buffers` - + +- `--enable-fips` +- `--es-module-specifier-resolution` +- `--experimental-exports` +- `--experimental-modules` +- `--experimental-policy` +- `--experimental-repl-await` +- `--experimental-report` +- `--experimental-vm-modules` +- `--experimental-wasm-modules` +- `--force-fips` +- `--frozen-intrinsics` +- `--heapsnapshot-signal` +- `--http-parser` +- `--icu-data-dir` +- `--input-type` +- `--inspect-brk` +- `--inspect-port`, `--debug-port` +- `--inspect-publish-uid` +- `--inspect` +- `--loader` +- `--max-http-header-size` +- `--napi-modules` +- `--no-deprecation` +- `--no-force-async-hooks-checks` +- `--no-warnings` +- `--openssl-config` +- `--pending-deprecation` +- `--policy-integrity` +- `--preserve-symlinks-main` +- `--preserve-symlinks` +- `--prof-process` +- `--redirect-warnings` +- `--report-directory` +- `--report-filename` +- `--report-on-fatalerror` +- `--report-on-signal` +- `--report-signal` +- `--report-uncaught-exception` +- `--require`, `-r` +- `--throw-deprecation` +- `--title` +- `--tls-cipher-list` +- `--tls-max-v1.2` +- `--tls-max-v1.3` +- `--tls-min-v1.0` +- `--tls-min-v1.1` +- `--tls-min-v1.2` +- `--tls-min-v1.3` +- `--trace-deprecation` +- `--trace-event-categories` +- `--trace-event-file-pattern` +- `--trace-events-enabled` +- `--trace-sync-io` +- `--trace-tls` +- `--trace-warnings` +- `--track-heap-objects` +- `--unhandled-rejections` +- `--use-bundled-ca` +- `--use-openssl-ca` +- `--v8-pool-size` +- `--zero-fill-buffers` + V8 options that are allowed are: + -* `--abort-on-uncaught-exception` -* `--interpreted-frames-native-stack` -* `--max-old-space-size` -* `--perf-basic-prof-only-functions` -* `--perf-basic-prof` -* `--perf-prof-unwinding-info` -* `--perf-prof` -* `--stack-trace-limit` - + +- `--abort-on-uncaught-exception` +- `--interpreted-frames-native-stack` +- `--max-old-space-size` +- `--perf-basic-prof-only-functions` +- `--perf-basic-prof` +- `--perf-prof-unwinding-info` +- `--perf-prof` +- `--stack-trace-limit` + ### `NODE_PATH=path[:…]` + @@ -1056,6 +1145,7 @@ added: v0.1.32 On Windows, this is a `';'`-separated list instead. ### `NODE_PENDING_DEPRECATION=1` + @@ -1063,7 +1153,7 @@ added: v8.0.0 When set to `1`, emit pending deprecation warnings. Pending deprecations are generally identical to a runtime deprecation with the -notable exception that they are turned *off* by default and will not be emitted +notable exception that they are turned _off_ by default and will not be emitted unless either the `--pending-deprecation` command line flag, or the `NODE_PENDING_DEPRECATION=1` environment variable, is set. Pending deprecations are used to provide a kind of selective "early warning" mechanism that @@ -1075,6 +1165,7 @@ Set the number of pending pipe instance handles when the pipe server is waiting for connections. This setting applies to Windows only. ### `NODE_PRESERVE_SYMLINKS=1` + @@ -1083,17 +1174,19 @@ When set to `1`, instructs the module loader to preserve symbolic links when resolving and caching modules. ### `NODE_REDIRECT_WARNINGS=file` + -When set, process warnings will be emitted to the given file instead of -printing to stderr. The file will be created if it does not exist, and will be -appended to if it does. If an error occurs while attempting to write the -warning to the file, the warning will be written to stderr instead. This is -equivalent to using the `--redirect-warnings=file` command-line flag. +When set, process warnings will be emitted to the given file instead of printing +to stderr. The file will be created if it does not exist, and will be appended +to if it does. If an error occurs while attempting to write the warning to the +file, the warning will be written to stderr instead. This is equivalent to using +the `--redirect-warnings=file` command-line flag. ### `NODE_REPL_HISTORY=file` + @@ -1135,18 +1228,20 @@ At this time coverage is only collected in the main thread and will not be output for code executed by worker threads. ### `OPENSSL_CONF=file` + Load an OpenSSL configuration file on startup. Among other uses, this can be -used to enable FIPS-compliant crypto if Node.js is built with `./configure ---openssl-fips`. +used to enable FIPS-compliant crypto if Node.js is built with +`./configure --openssl-fips`. If the [`--openssl-config`][] command line option is used, the environment variable is ignored. ### `SSL_CERT_DIR=dir` + @@ -1159,6 +1254,7 @@ variable will be inherited by any child processes, and if they use OpenSSL, it may cause them to trust the same CAs as node. ### `SSL_CERT_FILE=file` + @@ -1178,12 +1274,12 @@ Asynchronous system APIs are used by Node.js whenever possible, but where they do not exist, libuv's threadpool is used to create asynchronous node APIs based on synchronous system APIs. Node.js APIs that use the threadpool are: -* all `fs` APIs, other than the file watcher APIs and those that are explicitly +- all `fs` APIs, other than the file watcher APIs and those that are explicitly synchronous -* asynchronous crypto APIs such as `crypto.pbkdf2()`, `crypto.scrypt()`, +- asynchronous crypto APIs such as `crypto.pbkdf2()`, `crypto.scrypt()`, `crypto.randomBytes()`, `crypto.randomFill()`, `crypto.generateKeyPair()` -* `dns.lookup()` -* all `zlib` APIs, other than those that are explicitly synchronous +- `dns.lookup()` +- all `zlib` APIs, other than those that are explicitly synchronous Because libuv's threadpool has a fixed size, it means that if for whatever reason any of these APIs takes a long time, other (seemingly unrelated) APIs @@ -1194,21 +1290,28 @@ greater than `4` (its current default value). For more information, see the [libuv threadpool documentation][]. [`--openssl-config`]: #cli_openssl_config_file -[`Buffer`]: buffer.html#buffer_class_buffer -[`SlowBuffer`]: buffer.html#buffer_class_slowbuffer -[`process.setUncaughtExceptionCaptureCallback()`]: process.html#process_process_setuncaughtexceptioncapturecallback_fn -[`tls.DEFAULT_MAX_VERSION`]: tls.html#tls_tls_default_max_version -[`tls.DEFAULT_MIN_VERSION`]: tls.html#tls_tls_default_min_version -[`unhandledRejection`]: process.html#process_event_unhandledrejection -[Chrome DevTools Protocol]: https://chromedevtools.github.io/devtools-protocol/ -[REPL]: repl.html -[ScriptCoverage]: https://chromedevtools.github.io/devtools-protocol/tot/Profiler#type-ScriptCoverage -[Subresource Integrity]: https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity -[V8 JavaScript code coverage]: https://v8project.blogspot.com/2017/12/javascript-code-coverage.html -[customizing esm specifier resolution]: esm.html#esm_customizing_esm_specifier_resolution_algorithm +[`buffer`]: buffer.html#buffer_class_buffer +[`slowbuffer`]: buffer.html#buffer_class_slowbuffer +[`process.setuncaughtexceptioncapturecallback()`]: + process.html#process_process_setuncaughtexceptioncapturecallback_fn +[`tls.default_max_version`]: tls.html#tls_tls_default_max_version +[`tls.default_min_version`]: tls.html#tls_tls_default_min_version +[`unhandledrejection`]: process.html#process_event_unhandledrejection +[chrome devtools protocol]: https://chromedevtools.github.io/devtools-protocol/ +[repl]: repl.html +[scriptcoverage]: + https://chromedevtools.github.io/devtools-protocol/tot/Profiler#type-ScriptCoverage +[subresource integrity]: + https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity +[v8 javascript code coverage]: + https://v8project.blogspot.com/2017/12/javascript-code-coverage.html +[customizing esm specifier resolution]: + esm.html#esm_customizing_esm_specifier_resolution_algorithm [debugger]: debugger.html -[debugging security implications]: https://nodejs.org/en/docs/guides/debugging-getting-started/#security-implications +[debugging security implications]: + https://nodejs.org/en/docs/guides/debugging-getting-started/#security-implications [emit_warning]: process.html#process_process_emitwarning_warning_type_code_ctor -[experimental ECMAScript Module]: esm.html#esm_resolve_hook -[libuv threadpool documentation]: http://docs.libuv.org/en/latest/threadpool.html +[experimental ecmascript module]: esm.html#esm_resolve_hook +[libuv threadpool documentation]: + http://docs.libuv.org/en/latest/threadpool.html [remote code execution]: https://www.owasp.org/index.php/Code_Injection diff --git a/doc/api/deprecations.md b/doc/api/deprecations.md index 46d2467c7234b8..9e60a25486005c 100644 --- a/doc/api/deprecations.md +++ b/doc/api/deprecations.md @@ -5,27 +5,27 @@ Node.js may deprecate APIs for any of the following reasons: -* Use of the API is unsafe. -* An improved alternative API is available. -* Breaking changes to the API are expected in a future major release. +- Use of the API is unsafe. +- An improved alternative API is available. +- Breaking changes to the API are expected in a future major release. Node.js utilizes three kinds of Deprecations: -* Documentation-only -* Runtime -* End-of-Life +- Documentation-only +- Runtime +- End-of-Life A Documentation-only deprecation is one that is expressed only within the -Node.js API docs. These generate no side-effects while running Node.js. -Some Documentation-only deprecations trigger a runtime warning when launched -with [`--pending-deprecation`][] flag (or its alternative, +Node.js API docs. These generate no side-effects while running Node.js. Some +Documentation-only deprecations trigger a runtime warning when launched with +[`--pending-deprecation`][] flag (or its alternative, `NODE_PENDING_DEPRECATION=1` environment variable), similarly to Runtime -deprecations below. Documentation-only deprecations that support that flag -are explicitly labeled as such in the +deprecations below. Documentation-only deprecations that support that flag are +explicitly labeled as such in the [list of Deprecated APIs](#deprecations_list_of_deprecated_apis). -A Runtime deprecation will, by default, generate a process warning that will -be printed to `stderr` the first time the deprecated API is used. When the +A Runtime deprecation will, by default, generate a process warning that will be +printed to `stderr` the first time the deprecated API is used. When the [`--throw-deprecation`][] command-line flag is used, a Runtime deprecation will cause an error to be thrown. @@ -41,7 +41,9 @@ However, the deprecation identifier will not be modified. ## List of Deprecated APIs + ### DEP0001: http.OutgoingMessage.prototype.flush + @@ -30,22 +31,22 @@ continue. The listener callback function is invoked with the value of [`process.exitCode`][] passed as the only argument. -The `'beforeExit'` event is *not* emitted for conditions causing explicit +The `'beforeExit'` event is _not_ emitted for conditions causing explicit termination, such as calling [`process.exit()`][] or uncaught exceptions. -The `'beforeExit'` should *not* be used as an alternative to the `'exit'` event +The `'beforeExit'` should _not_ be used as an alternative to the `'exit'` event unless the intention is to schedule additional work. ```js -process.on('beforeExit', (code) => { - console.log('Process beforeExit event with code: ', code); +process.on("beforeExit", code => { + console.log("Process beforeExit event with code: ", code); }); -process.on('exit', (code) => { - console.log('Process exit event with code: ', code); +process.on("exit", code => { + console.log("Process exit event with code: ", code); }); -console.log('This message is displayed first.'); +console.log("This message is displayed first."); // Prints: // This message is displayed first. @@ -54,6 +55,7 @@ console.log('This message is displayed first.'); ``` ### Event: 'disconnect' + @@ -63,52 +65,54 @@ and [Cluster][] documentation), the `'disconnect'` event will be emitted when the IPC channel is closed. ### Event: 'exit' + -* `code` {integer} +- `code` {integer} The `'exit'` event is emitted when the Node.js process is about to exit as a result of either: -* The `process.exit()` method being called explicitly; -* The Node.js event loop no longer having any additional work to perform. +- The `process.exit()` method being called explicitly; +- The Node.js event loop no longer having any additional work to perform. There is no way to prevent the exiting of the event loop at this point, and once all `'exit'` listeners have finished running the Node.js process will terminate. -The listener callback function is invoked with the exit code specified either -by the [`process.exitCode`][] property, or the `exitCode` argument passed to the +The listener callback function is invoked with the exit code specified either by +the [`process.exitCode`][] property, or the `exitCode` argument passed to the [`process.exit()`] method. ```js -process.on('exit', (code) => { +process.on("exit", code => { console.log(`About to exit with code: ${code}`); }); ``` Listener functions **must** only perform **synchronous** operations. The Node.js -process will exit immediately after calling the `'exit'` event listeners -causing any additional work still queued in the event loop to be abandoned. -In the following example, for instance, the timeout will never occur: +process will exit immediately after calling the `'exit'` event listeners causing +any additional work still queued in the event loop to be abandoned. In the +following example, for instance, the timeout will never occur: ```js -process.on('exit', (code) => { +process.on("exit", code => { setTimeout(() => { - console.log('This will not run'); + console.log("This will not run"); }, 0); }); ``` ### Event: 'message' + -* `message` { Object | boolean | number | string | null } a parsed JSON object +- `message` { Object | boolean | number | string | null } a parsed JSON object or a serializable primitive value. -* `sendHandle` {net.Server|net.Socket} a [`net.Server`][] or [`net.Socket`][] +- `sendHandle` {net.Server|net.Socket} a [`net.Server`][] or [`net.Socket`][] object, or undefined. If the Node.js process is spawned with an IPC channel (see the [Child Process][] @@ -120,21 +124,22 @@ The message goes through serialization and parsing. The resulting message might not be the same as what is originally sent. ### Event: 'multipleResolves' + -* `type` {string} The resolution type. One of `'resolve'` or `'reject'`. -* `promise` {Promise} The promise that resolved or rejected more than once. -* `value` {any} The value with which the promise was either resolved or - rejected after the original resolve. +- `type` {string} The resolution type. One of `'resolve'` or `'reject'`. +- `promise` {Promise} The promise that resolved or rejected more than once. +- `value` {any} The value with which the promise was either resolved or rejected + after the original resolve. The `'multipleResolves'` event is emitted whenever a `Promise` has been either: -* Resolved more than once. -* Rejected more than once. -* Rejected after resolve. -* Resolved after reject. +- Resolved more than once. +- Rejected more than once. +- Rejected after resolve. +- Resolved after reject. This is useful for tracking potential errors in an application while using the `Promise` constructor, as multiple resolutions are silently swallowed. However, @@ -142,7 +147,7 @@ the occurrence of this event does not necessarily indicate an error. For example, [`Promise.race()`][] can trigger a `'multipleResolves'` event. ```js -process.on('multipleResolves', (type, promise, reason) => { +process.on("multipleResolves", (type, promise, reason) => { console.error(type, promise, reason); setImmediate(() => process.exit(1)); }); @@ -150,12 +155,12 @@ process.on('multipleResolves', (type, promise, reason) => { async function main() { try { return await new Promise((resolve, reject) => { - resolve('First call'); - resolve('Swallowed resolve'); - reject(new Error('Swallowed reject')); + resolve("First call"); + resolve("Swallowed resolve"); + reject(new Error("Swallowed reject")); }); } catch { - throw new Error('Failed'); + throw new Error("Failed"); } } @@ -169,11 +174,12 @@ main().then(console.log); ``` ### Event: 'rejectionHandled' + -* `promise` {Promise} The late handled promise. +- `promise` {Promise} The late handled promise. The `'rejectionHandled'` event is emitted whenever a `Promise` has been rejected and an error handler was attached to it (using [`promise.catch()`][], for @@ -201,10 +207,10 @@ when the list of unhandled rejections shrinks. ```js const unhandledRejections = new Map(); -process.on('unhandledRejection', (reason, promise) => { +process.on("unhandledRejection", (reason, promise) => { unhandledRejections.set(promise, reason); }); -process.on('rejectionHandled', (promise) => { +process.on("rejectionHandled", promise => { unhandledRejections.delete(promise); }); ``` @@ -216,6 +222,7 @@ likely best for long-running application) or upon process exit (which is likely most convenient for scripts). ### Event: 'uncaughtException' + -* `err` {Error} The uncaught exception. -* `origin` {string} Indicates if the exception originates from an unhandled +- `err` {Error} The uncaught exception. +- `origin` {string} Indicates if the exception originates from an unhandled rejection or from synchronous errors. Can either be `'uncaughtException'` or `'unhandledRejection'`. -The `'uncaughtException'` event is emitted when an uncaught JavaScript -exception bubbles all the way back to the event loop. By default, Node.js -handles such exceptions by printing the stack trace to `stderr` and exiting -with code 1, overriding any previously set [`process.exitCode`][]. -Adding a handler for the `'uncaughtException'` event overrides this default -behavior. Alternatively, change the [`process.exitCode`][] in the -`'uncaughtException'` handler which will result in the process exiting with the -provided exit code. Otherwise, in the presence of such handler the process will -exit with 0. +The `'uncaughtException'` event is emitted when an uncaught JavaScript exception +bubbles all the way back to the event loop. By default, Node.js handles such +exceptions by printing the stack trace to `stderr` and exiting with code 1, +overriding any previously set [`process.exitCode`][]. Adding a handler for the +`'uncaughtException'` event overrides this default behavior. Alternatively, +change the [`process.exitCode`][] in the `'uncaughtException'` handler which +will result in the process exiting with the provided exit code. Otherwise, in +the presence of such handler the process will exit with 0. ```js -process.on('uncaughtException', (err, origin) => { +process.on("uncaughtException", (err, origin) => { fs.writeSync( process.stderr.fd, - `Caught exception: ${err}\n` + - `Exception origin: ${origin}` + `Caught exception: ${err}\n` + `Exception origin: ${origin}` ); }); setTimeout(() => { - console.log('This will still run.'); + console.log("This will still run."); }, 500); // Intentionally cause an exception, but don't catch it. nonexistentFunc(); -console.log('This will not run.'); +console.log("This will not run."); ``` #### Warning: Using `'uncaughtException'` correctly -`'uncaughtException'` is a crude mechanism for exception handling -intended to be used only as a last resort. The event *should not* be used as -an equivalent to `On Error Resume Next`. Unhandled exceptions inherently mean -that an application is in an undefined state. Attempting to resume application -code without properly recovering from the exception can cause additional -unforeseen and unpredictable issues. +`'uncaughtException'` is a crude mechanism for exception handling intended to be +used only as a last resort. The event _should not_ be used as an equivalent to +`On Error Resume Next`. Unhandled exceptions inherently mean that an application +is in an undefined state. Attempting to resume application code without properly +recovering from the exception can cause additional unforeseen and unpredictable +issues. Exceptions thrown from within the event handler will not be caught. Instead the process will exit with a non-zero exit code and the stack trace will be printed. This is to avoid infinite recursion. Attempting to resume normally after an uncaught exception can be similar to -pulling out of the power cord when upgrading a computer — nine out of ten -times nothing happens - but the 10th time, the system becomes corrupted. +pulling out of the power cord when upgrading a computer — nine out of ten times +nothing happens - but the 10th time, the system becomes corrupted. -The correct use of `'uncaughtException'` is to perform synchronous cleanup -of allocated resources (e.g. file descriptors, handles, etc) before shutting -down the process. **It is not safe to resume normal operation after +The correct use of `'uncaughtException'` is to perform synchronous cleanup of +allocated resources (e.g. file descriptors, handles, etc) before shutting down +the process. **It is not safe to resume normal operation after `'uncaughtException'`.** To restart a crashed application in a more reliable way, whether @@ -285,6 +290,7 @@ in a separate process to detect application failures and recover or restart as needed. ### Event: 'unhandledRejection' + -* `reason` {Error|any} The object with which the promise was rejected - (typically an [`Error`][] object). -* `promise` {Promise} The rejected promise. +- `reason` {Error|any} The object with which the promise was rejected (typically + an [`Error`][] object). +- `promise` {Promise} The rejected promise. The `'unhandledRejection'` event is emitted whenever a `Promise` is rejected and no error handler is attached to the promise within a turn of the event loop. @@ -310,23 +316,22 @@ useful for detecting and keeping track of promises that were rejected whose rejections have not yet been handled. ```js -process.on('unhandledRejection', (reason, promise) => { - console.log('Unhandled Rejection at:', promise, 'reason:', reason); +process.on("unhandledRejection", (reason, promise) => { + console.log("Unhandled Rejection at:", promise, "reason:", reason); // Application specific logging, throwing an error, or other logic here }); -somePromise.then((res) => { +somePromise.then(res => { return reportToUser(JSON.pasre(res)); // Note the typo (`pasre`) }); // No `.catch()` or `.then()` ``` -The following will also trigger the `'unhandledRejection'` event to be -emitted: +The following will also trigger the `'unhandledRejection'` event to be emitted: ```js function SomeResource() { // Initially set the loaded status to a rejected promise - this.loaded = Promise.reject(new Error('Resource not yet loaded!')); + this.loaded = Promise.reject(new Error("Resource not yet loaded!")); } const resource = new SomeResource(); @@ -341,29 +346,30 @@ address such failures, a non-operational being emitted. ### Event: 'warning' + -* `warning` {Error} Key properties of the warning are: - * `name` {string} The name of the warning. **Default:** `'Warning'`. - * `message` {string} A system-provided description of the warning. - * `stack` {string} A stack trace to the location in the code where the warning +- `warning` {Error} Key properties of the warning are: + - `name` {string} The name of the warning. **Default:** `'Warning'`. + - `message` {string} A system-provided description of the warning. + - `stack` {string} A stack trace to the location in the code where the warning was issued. The `'warning'` event is emitted whenever Node.js emits a process warning. A process warning is similar to an error in that it describes exceptional -conditions that are being brought to the user's attention. However, warnings -are not part of the normal Node.js and JavaScript error handling flow. -Node.js can emit warnings whenever it detects bad coding practices that could -lead to sub-optimal application performance, bugs, or security vulnerabilities. +conditions that are being brought to the user's attention. However, warnings are +not part of the normal Node.js and JavaScript error handling flow. Node.js can +emit warnings whenever it detects bad coding practices that could lead to +sub-optimal application performance, bugs, or security vulnerabilities. ```js -process.on('warning', (warning) => { - console.warn(warning.name); // Print the warning name +process.on("warning", warning => { + console.warn(warning.name); // Print the warning name console.warn(warning.message); // Print the warning message - console.warn(warning.stack); // Print the stack trace + console.warn(warning.stack); // Print the stack trace }); ``` @@ -383,8 +389,8 @@ $ node detected. 2 foo listeners added. Use emitter.setMaxListeners() to increase limit ``` -In contrast, the following example turns off the default warning output and -adds a custom handler to the `'warning'` event: +In contrast, the following example turns off the default warning output and adds +a custom handler to the `'warning'` event: ```console $ node --no-warnings @@ -398,14 +404,14 @@ $ node --no-warnings The `--trace-warnings` command-line option can be used to have the default console output for warnings include the full stack trace of the warning. -Launching Node.js using the `--throw-deprecation` command line flag will -cause custom deprecation warnings to be thrown as exceptions. +Launching Node.js using the `--throw-deprecation` command line flag will cause +custom deprecation warnings to be thrown as exceptions. Using the `--trace-deprecation` command line flag will cause the custom deprecation to be printed to `stderr` along with the stack trace. -Using the `--no-deprecation` command line flag will suppress all reporting -of the custom deprecation. +Using the `--no-deprecation` command line flag will suppress all reporting of +the custom deprecation. The `*-deprecation` command line flags only affect warnings that use the name `'DeprecationWarning'`. @@ -424,8 +430,8 @@ Signal events will be emitted when the Node.js process receives a signal. Please refer to signal(7) for a listing of standard POSIX signal names such as `'SIGINT'`, `'SIGHUP'`, etc. -The signal handler will receive the signal's name (`'SIGINT'`, - `'SIGTERM'`, etc.) as the first argument. +The signal handler will receive the signal's name (`'SIGINT'`, `'SIGTERM'`, +etc.) as the first argument. The name of each event will be the uppercase common name for the signal (e.g. `'SIGINT'` for `SIGINT` signals). @@ -434,8 +440,8 @@ The name of each event will be the uppercase common name for the signal (e.g. // Begin reading from stdin so the process does not exit. process.stdin.resume(); -process.on('SIGINT', () => { - console.log('Received SIGINT. Press Control-D to exit.'); +process.on("SIGINT", () => { + console.log("Received SIGINT. Press Control-D to exit."); }); // Using a single function to handle multiple signals @@ -443,49 +449,50 @@ function handle(signal) { console.log(`Received ${signal}`); } -process.on('SIGINT', handle); -process.on('SIGTERM', handle); +process.on("SIGINT", handle); +process.on("SIGTERM", handle); ``` -* `'SIGUSR1'` is reserved by Node.js to start the [debugger][]. It's possible to +- `'SIGUSR1'` is reserved by Node.js to start the [debugger][]. It's possible to install a listener but doing so might interfere with the debugger. -* `'SIGTERM'` and `'SIGINT'` have default handlers on non-Windows platforms that +- `'SIGTERM'` and `'SIGINT'` have default handlers on non-Windows platforms that reset the terminal mode before exiting with code `128 + signal number`. If one of these signals has a listener installed, its default behavior will be removed (Node.js will no longer exit). -* `'SIGPIPE'` is ignored by default. It can have a listener installed. -* `'SIGHUP'` is generated on Windows when the console window is closed, and on +- `'SIGPIPE'` is ignored by default. It can have a listener installed. +- `'SIGHUP'` is generated on Windows when the console window is closed, and on other platforms under various similar conditions. See signal(7). It can have a listener installed, however Node.js will be unconditionally terminated by - Windows about 10 seconds later. On non-Windows platforms, the default - behavior of `SIGHUP` is to terminate Node.js, but once a listener has been - installed its default behavior will be removed. -* `'SIGTERM'` is not supported on Windows, it can be listened on. -* `'SIGINT'` from the terminal is supported on all platforms, and can usually be + Windows about 10 seconds later. On non-Windows platforms, the default behavior + of `SIGHUP` is to terminate Node.js, but once a listener has been installed + its default behavior will be removed. +- `'SIGTERM'` is not supported on Windows, it can be listened on. +- `'SIGINT'` from the terminal is supported on all platforms, and can usually be generated with `+C` (though this may be configurable). It is not generated when terminal raw mode is enabled. -* `'SIGBREAK'` is delivered on Windows when `+` is pressed, on +- `'SIGBREAK'` is delivered on Windows when `+` is pressed, on non-Windows platforms it can be listened on, but there is no way to send or generate it. -* `'SIGWINCH'` is delivered when the console has been resized. On Windows, this +- `'SIGWINCH'` is delivered when the console has been resized. On Windows, this will only happen on write to the console when the cursor is being moved, or when a readable tty is used in raw mode. -* `'SIGKILL'` cannot have a listener installed, it will unconditionally +- `'SIGKILL'` cannot have a listener installed, it will unconditionally terminate Node.js on all platforms. -* `'SIGSTOP'` cannot have a listener installed. -* `'SIGBUS'`, `'SIGFPE'`, `'SIGSEGV'` and `'SIGILL'`, when not raised - artificially using kill(2), inherently leave the process in a state from - which it is not safe to attempt to call JS listeners. Doing so might lead to - the process hanging in an endless loop, since listeners attached using - `process.on()` are called asynchronously and therefore unable to correct the - underlying problem. - -Windows does not support sending signals, but Node.js offers some emulation -with [`process.kill()`][], and [`subprocess.kill()`][]. Sending signal `0` can -be used to test for the existence of a process. Sending `SIGINT`, `SIGTERM`, -and `SIGKILL` cause the unconditional termination of the target process. +- `'SIGSTOP'` cannot have a listener installed. +- `'SIGBUS'`, `'SIGFPE'`, `'SIGSEGV'` and `'SIGILL'`, when not raised + artificially using kill(2), inherently leave the process in a state from which + it is not safe to attempt to call JS listeners. Doing so might lead to the + process hanging in an endless loop, since listeners attached using + `process.on()` are called asynchronously and therefore unable to correct the + underlying problem. + +Windows does not support sending signals, but Node.js offers some emulation with +[`process.kill()`][], and [`subprocess.kill()`][]. Sending signal `0` can be +used to test for the existence of a process. Sending `SIGINT`, `SIGTERM`, and +`SIGKILL` cause the unconditional termination of the target process. ## process.abort() + @@ -496,39 +503,37 @@ generate a core file. This feature is not available in [`Worker`][] threads. ## process.allowedNodeEnvironmentFlags + -* {Set} +- {Set} -The `process.allowedNodeEnvironmentFlags` property is a special, -read-only `Set` of flags allowable within the [`NODE_OPTIONS`][] -environment variable. +The `process.allowedNodeEnvironmentFlags` property is a special, read-only `Set` +of flags allowable within the [`NODE_OPTIONS`][] environment variable. `process.allowedNodeEnvironmentFlags` extends `Set`, but overrides `Set.prototype.has` to recognize several different possible flag -representations. `process.allowedNodeEnvironmentFlags.has()` will -return `true` in the following cases: +representations. `process.allowedNodeEnvironmentFlags.has()` will return `true` +in the following cases: -* Flags may omit leading single (`-`) or double (`--`) dashes; e.g., +- Flags may omit leading single (`-`) or double (`--`) dashes; e.g., `inspect-brk` for `--inspect-brk`, or `r` for `-r`. -* Flags passed through to V8 (as listed in `--v8-options`) may replace - one or more *non-leading* dashes for an underscore, or vice-versa; - e.g., `--perf_basic_prof`, `--perf-basic-prof`, `--perf_basic-prof`, - etc. -* Flags may contain one or more equals (`=`) characters; all - characters after and including the first equals will be ignored; - e.g., `--stack-trace-limit=100`. -* Flags *must* be allowable within [`NODE_OPTIONS`][]. - -When iterating over `process.allowedNodeEnvironmentFlags`, flags will -appear only *once*; each will begin with one or more dashes. Flags -passed through to V8 will contain underscores instead of non-leading -dashes: - -```js -process.allowedNodeEnvironmentFlags.forEach((flag) => { +- Flags passed through to V8 (as listed in `--v8-options`) may replace one or + more _non-leading_ dashes for an underscore, or vice-versa; e.g., + `--perf_basic_prof`, `--perf-basic-prof`, `--perf_basic-prof`, etc. +- Flags may contain one or more equals (`=`) characters; all characters after + and including the first equals will be ignored; e.g., + `--stack-trace-limit=100`. +- Flags _must_ be allowable within [`NODE_OPTIONS`][]. + +When iterating over `process.allowedNodeEnvironmentFlags`, flags will appear +only _once_; each will begin with one or more dashes. Flags passed through to V8 +will contain underscores instead of non-leading dashes: + +```js +process.allowedNodeEnvironmentFlags.forEach(flag => { // -r // --inspect-brk // --abort_on_uncaught_exception @@ -537,19 +542,19 @@ process.allowedNodeEnvironmentFlags.forEach((flag) => { ``` The methods `add()`, `clear()`, and `delete()` of -`process.allowedNodeEnvironmentFlags` do nothing, and will fail -silently. +`process.allowedNodeEnvironmentFlags` do nothing, and will fail silently. -If Node.js was compiled *without* [`NODE_OPTIONS`][] support (shown in -[`process.config`][]), `process.allowedNodeEnvironmentFlags` will -contain what *would have* been allowable. +If Node.js was compiled _without_ [`NODE_OPTIONS`][] support (shown in +[`process.config`][]), `process.allowedNodeEnvironmentFlags` will contain what +_would have_ been allowable. ## process.arch + -* {string} +- {string} The `process.arch` property returns a string identifying the operating system CPU architecture for which the Node.js binary was compiled. @@ -562,17 +567,18 @@ console.log(`This processor architecture is ${process.arch}`); ``` ## process.argv + -* {string[]} +- {string[]} The `process.argv` property returns an array containing the command line arguments passed when the Node.js process was launched. The first element will be [`process.execPath`]. See `process.argv0` if access to the original value of -`argv[0]` is needed. The second element will be the path to the JavaScript -file being executed. The remaining elements will be any additional command line +`argv[0]` is needed. The second element will be the path to the JavaScript file +being executed. The remaining elements will be any additional command line arguments. For example, assuming the following script for `process-args.js`: @@ -601,11 +607,12 @@ Would generate the output: ``` ## process.argv0 + -* {string} +- {string} The `process.argv0` property stores a read-only copy of the original value of `argv[0]` passed when Node.js starts. @@ -619,32 +626,33 @@ $ bash -c 'exec -a customArgv0 ./node' ``` ## process.channel + -* {Object} +- {Object} -If the Node.js process was spawned with an IPC channel (see the -[Child Process][] documentation), the `process.channel` -property is a reference to the IPC channel. If no IPC channel exists, this -property is `undefined`. +If the Node.js process was spawned with an IPC channel (see the [Child +Process][] documentation), the `process.channel` property is a reference to the +IPC channel. If no IPC channel exists, this property is `undefined`. ## process.chdir(directory) + -* `directory` {string} +- `directory` {string} The `process.chdir()` method changes the current working directory of the -Node.js process or throws an exception if doing so fails (for instance, if -the specified `directory` does not exist). +Node.js process or throws an exception if doing so fails (for instance, if the +specified `directory` does not exist). ```js console.log(`Starting directory: ${process.cwd()}`); try { - process.chdir('/tmp'); + process.chdir("/tmp"); console.log(`New directory: ${process.cwd()}`); } catch (err) { console.error(`chdir: ${err}`); @@ -654,11 +662,12 @@ try { This feature is not available in [`Worker`][] threads. ## process.config + -* {Object} +- {Object} The `process.config` property returns an `Object` containing the JavaScript representation of the configure options used to compile the current Node.js @@ -668,6 +677,7 @@ running the `./configure` script. An example of the possible output looks like: + ```js { target_defaults: @@ -701,11 +711,12 @@ modules in the ecosystem that are known to extend, modify, or entirely replace the value of `process.config`. ## process.connected + -* {boolean} +- {boolean} If the Node.js process is spawned with an IPC channel (see the [Child Process][] and [Cluster][] documentation), the `process.connected` property will return @@ -716,15 +727,16 @@ Once `process.connected` is `false`, it is no longer possible to send messages over the IPC channel using `process.send()`. ## process.cpuUsage([previousValue]) + -* `previousValue` {Object} A previous return value from calling +- `previousValue` {Object} A previous return value from calling `process.cpuUsage()` -* Returns: {Object} - * `user` {integer} - * `system` {integer} +- Returns: {Object} + - `user` {integer} + - `system` {integer} The `process.cpuUsage()` method returns the user and system CPU time usage of the current process, in an object with properties `user` and `system`, whose @@ -748,11 +760,12 @@ console.log(process.cpuUsage(startUsage)); ``` ## process.cwd() + -* Returns: {string} +- Returns: {string} The `process.cwd()` method returns the current working directory of the Node.js process. @@ -762,11 +775,12 @@ console.log(`Current directory: ${process.cwd()}`); ``` ## process.debugPort + -* {number} +- {number} The port used by Node.js's debugger when enabled. @@ -775,6 +789,7 @@ process.debugPort = 5858; ``` ## process.disconnect() + @@ -791,6 +806,7 @@ If the Node.js process was not spawned with an IPC channel, `process.disconnect()` will be `undefined`. ## process.dlopen(module, filename[, flags]) + -* `module` {Object} -* `filename` {string} -* `flags` {os.constants.dlopen} **Default:** `os.constants.dlopen.RTLD_LAZY` +- `module` {Object} +- `filename` {string} +- `flags` {os.constants.dlopen} **Default:** `os.constants.dlopen.RTLD_LAZY` -The `process.dlopen()` method allows to dynamically load shared -objects. It is primarily used by `require()` to load -C++ Addons, and should not be used directly, except in special -cases. In other words, [`require()`][] should be preferred over -`process.dlopen()`, unless there are specific reasons. +The `process.dlopen()` method allows to dynamically load shared objects. It is +primarily used by `require()` to load C++ Addons, and should not be used +directly, except in special cases. In other words, [`require()`][] should be +preferred over `process.dlopen()`, unless there are specific reasons. -The `flags` argument is an integer that allows to specify dlopen -behavior. See the [`os.constants.dlopen`][] documentation for details. +The `flags` argument is an integer that allows to specify dlopen behavior. See +the [`os.constants.dlopen`][] documentation for details. -If there are specific reasons to use `process.dlopen()` (for instance, -to specify dlopen flags), it's often useful to use [`require.resolve()`][] -to look up the module's path. +If there are specific reasons to use `process.dlopen()` (for instance, to +specify dlopen flags), it's often useful to use [`require.resolve()`][] to look +up the module's path. An important drawback when calling `process.dlopen()` is that the `module` instance must be passed. Functions exported by the C++ Addon will be accessible via `module.exports`. -The example below shows how to load a C++ Addon, named as `binding`, -that exports a `foo` function. All the symbols will be loaded before -the call returns, by passing the `RTLD_NOW` constant. In this example -the constant is assumed to be available. +The example below shows how to load a C++ Addon, named as `binding`, that +exports a `foo` function. All the symbols will be loaded before the call +returns, by passing the `RTLD_NOW` constant. In this example the constant is +assumed to be available. ```js -const os = require('os'); -process.dlopen(module, require.resolve('binding'), - os.constants.dlopen.RTLD_NOW); +const os = require("os"); +process.dlopen( + module, + require.resolve("binding"), + os.constants.dlopen.RTLD_NOW +); module.exports.foo(); ``` ## process.emitWarning(warning[, options]) + -* `warning` {string|Error} The warning to emit. -* `options` {Object} - * `type` {string} When `warning` is a `String`, `type` is the name to use - for the *type* of warning being emitted. **Default:** `'Warning'`. - * `code` {string} A unique identifier for the warning instance being emitted. - * `ctor` {Function} When `warning` is a `String`, `ctor` is an optional +- `warning` {string|Error} The warning to emit. +- `options` {Object} + - `type` {string} When `warning` is a `String`, `type` is the name to use for + the _type_ of warning being emitted. **Default:** `'Warning'`. + - `code` {string} A unique identifier for the warning instance being emitted. + - `ctor` {Function} When `warning` is a `String`, `ctor` is an optional function used to limit the generated stack trace. **Default:** `process.emitWarning`. - * `detail` {string} Additional text to include with the error. + - `detail` {string} Additional text to include with the error. The `process.emitWarning()` method can be used to emit custom or application specific process warnings. These can be listened for by adding a handler to the @@ -853,9 +872,9 @@ specific process warnings. These can be listened for by adding a handler to the ```js // Emit a warning with a code and additional detail. -process.emitWarning('Something happened!', { - code: 'MY_WARNING', - detail: 'This is some additional information' +process.emitWarning("Something happened!", { + code: "MY_WARNING", + detail: "This is some additional information" }); // Emits: // (node:56338) [MY_WARNING] Warning: Something happened! @@ -863,33 +882,33 @@ process.emitWarning('Something happened!', { ``` In this example, an `Error` object is generated internally by -`process.emitWarning()` and passed through to the -[`'warning'`][process_warning] handler. +`process.emitWarning()` and passed through to the [`'warning'`][process_warning] +handler. ```js -process.on('warning', (warning) => { - console.warn(warning.name); // 'Warning' +process.on("warning", warning => { + console.warn(warning.name); // 'Warning' console.warn(warning.message); // 'Something happened!' - console.warn(warning.code); // 'MY_WARNING' - console.warn(warning.stack); // Stack trace - console.warn(warning.detail); // 'This is some additional information' + console.warn(warning.code); // 'MY_WARNING' + console.warn(warning.stack); // Stack trace + console.warn(warning.detail); // 'This is some additional information' }); ``` If `warning` is passed as an `Error` object, the `options` argument is ignored. ## process.emitWarning(warning[, type[, code]][, ctor]) + -* `warning` {string|Error} The warning to emit. -* `type` {string} When `warning` is a `String`, `type` is the name to use - for the *type* of warning being emitted. **Default:** `'Warning'`. -* `code` {string} A unique identifier for the warning instance being emitted. -* `ctor` {Function} When `warning` is a `String`, `ctor` is an optional - function used to limit the generated stack trace. **Default:** - `process.emitWarning`. +- `warning` {string|Error} The warning to emit. +- `type` {string} When `warning` is a `String`, `type` is the name to use for + the _type_ of warning being emitted. **Default:** `'Warning'`. +- `code` {string} A unique identifier for the warning instance being emitted. +- `ctor` {Function} When `warning` is a `String`, `ctor` is an optional function + used to limit the generated stack trace. **Default:** `process.emitWarning`. The `process.emitWarning()` method can be used to emit custom or application specific process warnings. These can be listened for by adding a handler to the @@ -897,18 +916,18 @@ specific process warnings. These can be listened for by adding a handler to the ```js // Emit a warning using a string. -process.emitWarning('Something happened!'); +process.emitWarning("Something happened!"); // Emits: (node: 56338) Warning: Something happened! ``` ```js // Emit a warning using a string and a type. -process.emitWarning('Something Happened!', 'CustomWarning'); +process.emitWarning("Something Happened!", "CustomWarning"); // Emits: (node:56338) CustomWarning: Something Happened! ``` ```js -process.emitWarning('Something happened!', 'CustomWarning', 'WARN001'); +process.emitWarning("Something happened!", "CustomWarning", "WARN001"); // Emits: (node:56338) [WARN001] CustomWarning: Something happened! ``` @@ -917,7 +936,7 @@ In each of the previous examples, an `Error` object is generated internally by handler. ```js -process.on('warning', (warning) => { +process.on("warning", warning => { console.warn(warning.name); console.warn(warning.message); console.warn(warning.code); @@ -926,15 +945,15 @@ process.on('warning', (warning) => { ``` If `warning` is passed as an `Error` object, it will be passed through to the -`'warning'` event handler unmodified (and the optional `type`, -`code` and `ctor` arguments will be ignored): +`'warning'` event handler unmodified (and the optional `type`, `code` and `ctor` +arguments will be ignored): ```js // Emit a warning using an Error object. -const myWarning = new Error('Something happened!'); +const myWarning = new Error("Something happened!"); // Use the Error name property to specify the type name -myWarning.name = 'CustomWarning'; -myWarning.code = 'WARN001'; +myWarning.name = "CustomWarning"; +myWarning.code = "WARN001"; process.emitWarning(myWarning); // Emits: (node:56338) [WARN001] CustomWarning: Something happened! @@ -943,30 +962,30 @@ process.emitWarning(myWarning); A `TypeError` is thrown if `warning` is anything other than a string or `Error` object. -While process warnings use `Error` objects, the process warning -mechanism is **not** a replacement for normal error handling mechanisms. +While process warnings use `Error` objects, the process warning mechanism is +**not** a replacement for normal error handling mechanisms. The following additional handling is implemented if the warning `type` is `'DeprecationWarning'`: -* If the `--throw-deprecation` command-line flag is used, the deprecation +- If the `--throw-deprecation` command-line flag is used, the deprecation warning is thrown as an exception rather than being emitted as an event. -* If the `--no-deprecation` command-line flag is used, the deprecation - warning is suppressed. -* If the `--trace-deprecation` command-line flag is used, the deprecation +- If the `--no-deprecation` command-line flag is used, the deprecation warning + is suppressed. +- If the `--trace-deprecation` command-line flag is used, the deprecation warning is printed to `stderr` along with the full stack trace. ### Avoiding duplicate warnings -As a best practice, warnings should be emitted only once per process. To do -so, it is recommended to place the `emitWarning()` behind a simple boolean -flag as illustrated in the example below: +As a best practice, warnings should be emitted only once per process. To do so, +it is recommended to place the `emitWarning()` behind a simple boolean flag as +illustrated in the example below: ```js function emitMyWarning() { if (!emitMyWarning.warned) { emitMyWarning.warned = true; - process.emitWarning('Only warn once!'); + process.emitWarning("Only warn once!"); } } emitMyWarning(); @@ -976,6 +995,7 @@ emitMyWarning(); ``` ## process.env + -* {Object} +- {Object} The `process.env` property returns an object containing the user environment. See environ(7). @@ -997,6 +1017,7 @@ See environ(7). An example of this object looks like: + ```js { TERM: 'xterm-256color', @@ -1013,9 +1034,8 @@ An example of this object looks like: ``` It is possible to modify this object, but such modifications will not be -reflected outside the Node.js process, or (unless explicitly requested) -to other [`Worker`][] threads. -In other words, the following example would not work: +reflected outside the Node.js process, or (unless explicitly requested) to other +[`Worker`][] threads. In other words, the following example would not work: ```console $ node -e 'process.env.foo = "bar"' && echo $foo @@ -1024,13 +1044,13 @@ $ node -e 'process.env.foo = "bar"' && echo $foo While the following will: ```js -process.env.foo = 'bar'; +process.env.foo = "bar"; console.log(process.env.foo); ``` -Assigning a property on `process.env` will implicitly convert the value -to a string. **This behavior is deprecated.** Future versions of Node.js may -throw an error when the value is not a string, number, or boolean. +Assigning a property on `process.env` will implicitly convert the value to a +string. **This behavior is deprecated.** Future versions of Node.js may throw an +error when the value is not a string, number, or boolean. ```js process.env.test = null; @@ -1058,19 +1078,20 @@ console.log(process.env.test); // => 1 ``` -Unless explicitly specified when creating a [`Worker`][] instance, -each [`Worker`][] thread has its own copy of `process.env`, based on its -parent thread’s `process.env`, or whatever was specified as the `env` option -to the [`Worker`][] constructor. Changes to `process.env` will not be visible -across [`Worker`][] threads, and only the main thread can make changes that -are visible to the operating system or to native add-ons. +Unless explicitly specified when creating a [`Worker`][] instance, each +[`Worker`][] thread has its own copy of `process.env`, based on its parent +thread’s `process.env`, or whatever was specified as the `env` option to the +[`Worker`][] constructor. Changes to `process.env` will not be visible across +[`Worker`][] threads, and only the main thread can make changes that are visible +to the operating system or to native add-ons. ## process.execArgv + -* {string[]} +- {string[]} The `process.execArgv` property returns the set of Node.js-specific command-line options passed when the Node.js process was launched. These options do not @@ -1086,38 +1107,43 @@ $ node --harmony script.js --version Results in `process.execArgv`: + ```js -['--harmony'] +["--harmony"]; ``` And `process.argv`: + ```js -['/usr/local/bin/node', 'script.js', '--version'] +["/usr/local/bin/node", "script.js", "--version"]; ``` ## process.execPath + -* {string} +- {string} The `process.execPath` property returns the absolute pathname of the executable that started the Node.js process. + ```js -'/usr/local/bin/node' +"/usr/local/bin/node"; ``` ## process.exit([code]) + -* `code` {integer} The exit code. **Default:** `0`. +- `code` {integer} The exit code. **Default:** `0`. The `process.exit()` method instructs Node.js to terminate the process synchronously with an exit status of `code`. If `code` is omitted, exit uses @@ -1139,11 +1165,11 @@ completed fully, including I/O operations to `process.stdout` and `process.stderr`. In most situations, it is not actually necessary to call `process.exit()` -explicitly. The Node.js process will exit on its own *if there is no additional -work pending* in the event loop. The `process.exitCode` property can be set to +explicitly. The Node.js process will exit on its own _if there is no additional +work pending_ in the event loop. The `process.exitCode` property can be set to tell the process which exit code to use when the process exits gracefully. -For instance, the following example illustrates a *misuse* of the +For instance, the following example illustrates a _misuse_ of the `process.exit()` method that could lead to data printed to stdout being truncated and lost: @@ -1156,11 +1182,11 @@ if (someConditionNotMet()) { ``` The reason this is problematic is because writes to `process.stdout` in Node.js -are sometimes *asynchronous* and may occur over multiple ticks of the Node.js +are sometimes _asynchronous_ and may occur over multiple ticks of the Node.js event loop. Calling `process.exit()`, however, forces the process to exit -*before* those additional writes to `stdout` can be performed. +_before_ those additional writes to `stdout` can be performed. -Rather than calling `process.exit()` directly, the code *should* set the +Rather than calling `process.exit()` directly, the code _should_ set the `process.exitCode` and allow the process to exit naturally by avoiding scheduling any additional work for the event loop: @@ -1174,33 +1200,34 @@ if (someConditionNotMet()) { ``` If it is necessary to terminate the Node.js process due to an error condition, -throwing an *uncaught* error and allowing the process to terminate accordingly +throwing an _uncaught_ error and allowing the process to terminate accordingly is safer than calling `process.exit()`. -In [`Worker`][] threads, this function stops the current thread rather -than the current process. +In [`Worker`][] threads, this function stops the current thread rather than the +current process. ## process.exitCode + -* {integer} +- {integer} -A number which will be the process exit code, when the process either -exits gracefully, or is exited via [`process.exit()`][] without specifying -a code. +A number which will be the process exit code, when the process either exits +gracefully, or is exited via [`process.exit()`][] without specifying a code. Specifying a code to [`process.exit(code)`][`process.exit()`] will override any previous setting of `process.exitCode`. ## process.getegid() + -The `process.getegid()` method returns the numerical effective group identity -of the Node.js process. (See getegid(2).) +The `process.getegid()` method returns the numerical effective group identity of +the Node.js process. (See getegid(2).) ```js if (process.getegid) { @@ -1212,11 +1239,12 @@ This function is only available on POSIX platforms (i.e. not Windows or Android). ## process.geteuid() + -* Returns: {Object} +- Returns: {Object} The `process.geteuid()` method returns the numerical effective user identity of the process. (See geteuid(2).) @@ -1231,11 +1259,12 @@ This function is only available on POSIX platforms (i.e. not Windows or Android). ## process.getgid() + -* Returns: {Object} +- Returns: {Object} The `process.getgid()` method returns the numerical group identity of the process. (See getgid(2).) @@ -1250,11 +1279,12 @@ This function is only available on POSIX platforms (i.e. not Windows or Android). ## process.getgroups() + -* Returns: {integer[]} +- Returns: {integer[]} The `process.getgroups()` method returns an array with the supplementary group IDs. POSIX leaves it unspecified if the effective group ID is included but @@ -1264,11 +1294,12 @@ This function is only available on POSIX platforms (i.e. not Windows or Android). ## process.getuid() + -* Returns: {integer} +- Returns: {integer} The `process.getuid()` method returns the numeric user identity of the process. (See getuid(2).) @@ -1283,39 +1314,41 @@ This function is only available on POSIX platforms (i.e. not Windows or Android). ## process.hasUncaughtExceptionCaptureCallback() + -* Returns: {boolean} +- Returns: {boolean} Indicates whether a callback has been set using [`process.setUncaughtExceptionCaptureCallback()`][]. ## process.hrtime([time]) + -* `time` {integer[]} The result of a previous call to `process.hrtime()` -* Returns: {integer[]} +- `time` {integer[]} The result of a previous call to `process.hrtime()` +- Returns: {integer[]} -This is the legacy version of [`process.hrtime.bigint()`][] -before `bigint` was introduced in JavaScript. +This is the legacy version of [`process.hrtime.bigint()`][] before `bigint` was +introduced in JavaScript. -The `process.hrtime()` method returns the current high-resolution real time -in a `[seconds, nanoseconds]` tuple `Array`, where `nanoseconds` is the -remaining part of the real time that can't be represented in second precision. +The `process.hrtime()` method returns the current high-resolution real time in a +`[seconds, nanoseconds]` tuple `Array`, where `nanoseconds` is the remaining +part of the real time that can't be represented in second precision. `time` is an optional parameter that must be the result of a previous -`process.hrtime()` call to diff with the current time. If the parameter -passed in is not a tuple `Array`, a `TypeError` will be thrown. Passing in a +`process.hrtime()` call to diff with the current time. If the parameter passed +in is not a tuple `Array`, a `TypeError` will be thrown. Passing in a user-defined array instead of the result of a previous call to `process.hrtime()` will lead to undefined behavior. -These times are relative to an arbitrary time in the -past, and not related to the time of day and therefore not subject to clock -drift. The primary use is for measuring performance between intervals: +These times are relative to an arbitrary time in the past, and not related to +the time of day and therefore not subject to clock drift. The primary use is for +measuring performance between intervals: ```js const NS_PER_SEC = 1e9; @@ -1332,18 +1365,19 @@ setTimeout(() => { ``` ## process.hrtime.bigint() + -* Returns: {bigint} +- Returns: {bigint} -The `bigint` version of the [`process.hrtime()`][] method returning the -current high-resolution real time in nanoseconds as a `bigint`. +The `bigint` version of the [`process.hrtime()`][] method returning the current +high-resolution real time in nanoseconds as a `bigint`. -Unlike [`process.hrtime()`][], it does not support an additional `time` -argument since the difference can just be computed directly -by subtraction of the two `bigint`s. +Unlike [`process.hrtime()`][], it does not support an additional `time` argument +since the difference can just be computed directly by subtraction of the two +`bigint`s. ```js const start = process.hrtime.bigint(); @@ -1359,39 +1393,40 @@ setTimeout(() => { ``` ## process.initgroups(user, extraGroup) + -* `user` {string|number} The user name or numeric identifier. -* `extraGroup` {string|number} A group name or numeric identifier. +- `user` {string|number} The user name or numeric identifier. +- `extraGroup` {string|number} A group name or numeric identifier. The `process.initgroups()` method reads the `/etc/group` file and initializes -the group access list, using all groups of which the user is a member. This is -a privileged operation that requires that the Node.js process either have `root` +the group access list, using all groups of which the user is a member. This is a +privileged operation that requires that the Node.js process either have `root` access or the `CAP_SETGID` capability. Use care when dropping privileges: ```js -console.log(process.getgroups()); // [ 0 ] -process.initgroups('bnoordhuis', 1000); // switch user -console.log(process.getgroups()); // [ 27, 30, 46, 1000, 0 ] -process.setgid(1000); // drop root gid -console.log(process.getgroups()); // [ 27, 30, 46, 1000 ] +console.log(process.getgroups()); // [ 0 ] +process.initgroups("bnoordhuis", 1000); // switch user +console.log(process.getgroups()); // [ 27, 30, 46, 1000, 0 ] +process.setgid(1000); // drop root gid +console.log(process.getgroups()); // [ 27, 30, 46, 1000 ] ``` This function is only available on POSIX platforms (i.e. not Windows or -Android). -This feature is not available in [`Worker`][] threads. +Android). This feature is not available in [`Worker`][] threads. ## process.kill(pid[, signal]) + -* `pid` {number} A process ID -* `signal` {string|number} The signal to send, either as a string or number. +- `pid` {number} A process ID +- `signal` {string|number} The signal to send, either as a string or number. **Default:** `'SIGTERM'`. The `process.kill()` method sends the `signal` to the process identified by @@ -1410,38 +1445,40 @@ signal sender, like the `kill` system call. The signal sent may do something other than kill the target process. ```js -process.on('SIGHUP', () => { - console.log('Got SIGHUP signal.'); +process.on("SIGHUP", () => { + console.log("Got SIGHUP signal."); }); setTimeout(() => { - console.log('Exiting.'); + console.log("Exiting."); process.exit(0); }, 100); -process.kill(process.pid, 'SIGHUP'); +process.kill(process.pid, "SIGHUP"); ``` When `SIGUSR1` is received by a Node.js process, Node.js will start the debugger. See [Signal Events][]. ## process.mainModule + -* {Object} +- {Object} The `process.mainModule` property provides an alternative way of retrieving [`require.main`][]. The difference is that if the main module changes at runtime, [`require.main`][] may still refer to the original main module in -modules that were required before the change occurred. Generally, it's -safe to assume that the two refer to the same module. +modules that were required before the change occurred. Generally, it's safe to +assume that the two refer to the same module. -As with [`require.main`][], `process.mainModule` will be `undefined` if there -is no entry script. +As with [`require.main`][], `process.mainModule` will be `undefined` if there is +no entry script. ## process.memoryUsage() + -* Returns: {Object} - * `rss` {integer} - * `heapTotal` {integer} - * `heapUsed` {integer} - * `external` {integer} +- Returns: {Object} + - `rss` {integer} + - `heapTotal` {integer} + - `heapUsed` {integer} + - `external` {integer} The `process.memoryUsage()` method returns an object describing the memory usage of the Node.js process measured in bytes. @@ -1468,6 +1505,7 @@ console.log(process.memoryUsage()); Will generate: + ```js { rss: 4935680, @@ -1477,20 +1515,21 @@ Will generate: } ``` -`heapTotal` and `heapUsed` refer to V8's memory usage. -`external` refers to the memory usage of C++ objects bound to JavaScript -objects managed by V8. `rss`, Resident Set Size, is the amount of space -occupied in the main memory device (that is a subset of the total allocated -memory) for the process, which includes the _heap_, _code segment_ and _stack_. +`heapTotal` and `heapUsed` refer to V8's memory usage. `external` refers to the +memory usage of C++ objects bound to JavaScript objects managed by V8. `rss`, +Resident Set Size, is the amount of space occupied in the main memory device +(that is a subset of the total allocated memory) for the process, which includes +the _heap_, _code segment_ and _stack_. The _heap_ is where objects, strings, and closures are stored. Variables are -stored in the _stack_ and the actual JavaScript code resides in the -_code segment_. +stored in the _stack_ and the actual JavaScript code resides in the _code +segment_. When using [`Worker`][] threads, `rss` will be a value that is valid for the entire process, while the other fields will only refer to the current thread. ## process.nextTick(callback[, ...args]) + -* `callback` {Function} -* `...args` {any} Additional arguments to pass when invoking the `callback` +- `callback` {Function} +- `...args` {any} Additional arguments to pass when invoking the `callback` `process.nextTick()` adds `callback` to the "next tick queue". This queue is fully drained after the current operation on the JavaScript stack runs to @@ -1509,20 +1548,20 @@ create an infinite loop if one were to recursively call `process.nextTick()`. See the [Event Loop] guide for more background. ```js -console.log('start'); +console.log("start"); process.nextTick(() => { - console.log('nextTick callback'); + console.log("nextTick callback"); }); -console.log('scheduled'); +console.log("scheduled"); // Output: // start // scheduled // nextTick callback ``` -This is important when developing APIs in order to give users the opportunity -to assign event handlers *after* an object has been constructed but before any -I/O has occurred: +This is important when developing APIs in order to give users the opportunity to +assign event handlers _after_ an object has been constructed but before any I/O +has occurred: ```js function MyThing(options) { @@ -1550,7 +1589,7 @@ function maybeSync(arg, cb) { return; } - fs.stat('file', cb); + fs.stat("file", cb); } ``` @@ -1577,29 +1616,30 @@ function definitelyAsync(arg, cb) { return; } - fs.stat('file', cb); + fs.stat("file", cb); } ``` ## process.noDeprecation + -* {boolean} +- {boolean} The `process.noDeprecation` property indicates whether the `--no-deprecation` -flag is set on the current Node.js process. See the documentation for -the [`'warning'` event][process_warning] and the -[`emitWarning()` method][process_emit_warning] for more information about this -flag's behavior. +flag is set on the current Node.js process. See the documentation for the +[`'warning'` event][process_warning] and the [`emitWarning()` +method][process_emit_warning] for more information about this flag's behavior. ## process.pid + -* {integer} +- {integer} The `process.pid` property returns the PID of the process. @@ -1608,39 +1648,41 @@ console.log(`This process is pid ${process.pid}`); ``` ## process.platform + -* {string} +- {string} The `process.platform` property returns a string identifying the operating system platform on which the Node.js process is running. Currently possible values are: -* `'aix'` -* `'darwin'` -* `'freebsd'` -* `'linux'` -* `'openbsd'` -* `'sunos'` -* `'win32'` +- `'aix'` +- `'darwin'` +- `'freebsd'` +- `'linux'` +- `'openbsd'` +- `'sunos'` +- `'win32'` ```js console.log(`This platform is ${process.platform}`); ``` The value `'android'` may also be returned if the Node.js is built on the -Android operating system. However, Android support in Node.js -[is experimental][Android building]. +Android operating system. However, Android support in Node.js [is +experimental][android building]. ## process.ppid + -* {integer} +- {integer} The `process.ppid` property returns the PID of the current parent process. @@ -1649,6 +1691,7 @@ console.log(`The parent process is pid ${process.ppid}`); ``` ## process.release + -* {Object} +- {Object} The `process.release` property returns an `Object` containing metadata related to the current release, including URLs for the source tarball and headers-only @@ -1665,26 +1708,27 @@ tarball. `process.release` contains the following properties: -* `name` {string} A value that will always be `'node'` for Node.js. For - legacy io.js releases, this will be `'io.js'`. -* `sourceUrl` {string} an absolute URL pointing to a _`.tar.gz`_ file containing +- `name` {string} A value that will always be `'node'` for Node.js. For legacy + io.js releases, this will be `'io.js'`. +- `sourceUrl` {string} an absolute URL pointing to a _`.tar.gz`_ file containing the source code of the current release. -* `headersUrl`{string} an absolute URL pointing to a _`.tar.gz`_ file containing +- `headersUrl`{string} an absolute URL pointing to a _`.tar.gz`_ file containing only the source header files for the current release. This file is significantly smaller than the full source file and can be used for compiling Node.js native add-ons. -* `libUrl` {string} an absolute URL pointing to a _`node.lib`_ file matching the +- `libUrl` {string} an absolute URL pointing to a _`node.lib`_ file matching the architecture and version of the current release. This file is used for compiling Node.js native add-ons. _This property is only present on Windows builds of Node.js and will be missing on all other platforms._ -* `lts` {string} a string label identifying the [LTS][] label for this release. +- `lts` {string} a string label identifying the [LTS][] label for this release. This property only exists for LTS releases and is `undefined` for all other release types, including _Current_ releases. Currently the valid values are: - * `'Argon'` for the 4.x LTS line beginning with 4.2.0. - * `'Boron'` for the 6.x LTS line beginning with 6.9.0. - * `'Carbon'` for the 8.x LTS line beginning with 8.9.1. + - `'Argon'` for the 4.x LTS line beginning with 4.2.0. + - `'Boron'` for the 6.x LTS line beginning with 6.9.0. + - `'Carbon'` for the 8.x LTS line beginning with 8.9.1. + ```js { name: 'node', @@ -1695,31 +1739,33 @@ tarball. } ``` -In custom builds from non-release versions of the source tree, only the -`name` property may be present. The additional properties should not be -relied upon to exist. +In custom builds from non-release versions of the source tree, only the `name` +property may be present. The additional properties should not be relied upon to +exist. ## process.report + > Stability: 1 - Experimental -* {Object} +- {Object} `process.report` is an object whose methods are used to generate diagnostic reports for the current process. Additional documentation is available in the [report documentation][]. ### process.report.directory + > Stability: 1 - Experimental -* {string} +- {string} Directory where the report is written. The default value is the empty string, indicating that reports are written to the current working directory of the @@ -1730,13 +1776,14 @@ console.log(`Report directory is ${process.report.directory}`); ``` ### process.report.filename + > Stability: 1 - Experimental -* {string} +- {string} Filename where the report is written. If set to the empty string, the output filename will be comprised of a timestamp, PID, and sequence number. The default @@ -1747,14 +1794,15 @@ console.log(`Report filename is ${process.report.filename}`); ``` ### process.report.getReport([err]) + > Stability: 1 - Experimental -* `err` {Error} A custom error used for reporting the JavaScript stack. -* Returns: {Object} +- `err` {Error} A custom error used for reporting the JavaScript stack. +- Returns: {Object} Returns a JavaScript Object representation of a diagnostic report for the running process. The report's JavaScript stack trace is taken from `err`, if @@ -1765,20 +1813,21 @@ const data = process.report.getReport(); console.log(data.header.nodeJsVersion); // Similar to process.report.writeReport() -const fs = require('fs'); -fs.writeFileSync(util.inspect(data), 'my-report.log', 'utf8'); +const fs = require("fs"); +fs.writeFileSync(util.inspect(data), "my-report.log", "utf8"); ``` Additional documentation is available in the [report documentation][]. ### process.report.reportOnFatalError + > Stability: 1 - Experimental -* {boolean} +- {boolean} If `true`, a diagnostic report is generated on fatal errors, such as out of memory errors or failed C++ assertions. @@ -1788,29 +1837,31 @@ console.log(`Report on fatal error: ${process.report.reportOnFatalError}`); ``` ### process.report.reportOnSignal + > Stability: 1 - Experimental -* {boolean} +- {boolean} -If `true`, a diagnostic report is generated when the process receives the -signal specified by `process.report.signal`. +If `true`, a diagnostic report is generated when the process receives the signal +specified by `process.report.signal`. ```js console.log(`Report on signal: ${process.report.reportOnSignal}`); ``` ### process.report.reportOnUncaughtException + > Stability: 1 - Experimental -* {boolean} +- {boolean} If `true`, a diagnostic report is generated on uncaught exception. @@ -1819,13 +1870,14 @@ console.log(`Report on exception: ${process.report.reportOnUncaughtException}`); ``` ### process.report.signal + > Stability: 1 - Experimental -* {string} +- {string} The signal used to trigger the creation of a diagnostic report. Defaults to `'SIGUSR2'`. @@ -1835,19 +1887,20 @@ console.log(`Report signal: ${process.report.signal}`); ``` ### process.report.writeReport([filename][, err]) + > Stability: 1 - Experimental -* `filename` {string} Name of the file where the report is written. This - should be a relative path, that will be appended to the directory specified in +- `filename` {string} Name of the file where the report is written. This should + be a relative path, that will be appended to the directory specified in `process.report.directory`, or the current working directory of the Node.js process, if unspecified. -* `err` {Error} A custom error used for reporting the JavaScript stack. +- `err` {Error} A custom error used for reporting the JavaScript stack. -* Returns: {string} Returns the filename of the generated report. +- Returns: {string} Returns the filename of the generated report. Writes a diagnostic report to a file. If `filename` is not provided, the default filename includes the date, time, PID, and a sequence number. The report's @@ -1860,52 +1913,52 @@ process.report.writeReport(); Additional documentation is available in the [report documentation][]. ## process.resourceUsage() + -* Returns: {Object} the resource usage for the current process. All of these - values come from the `uv_getrusage` call which returns - a [`uv_rusage_t` struct][uv_rusage_t]. - * `userCPUTime` {integer} maps to `ru_utime` computed in microseconds. - It is the same value as [`process.cpuUsage().user`][process.cpuUsage]. - * `systemCPUTime` {integer} maps to `ru_stime` computed in microseconds. - It is the same value as [`process.cpuUsage().system`][process.cpuUsage]. - * `maxRSS` {integer} maps to `ru_maxrss` which is the maximum resident set +- Returns: {Object} the resource usage for the current process. All of these + values come from the `uv_getrusage` call which returns a [`uv_rusage_t` + struct][uv_rusage_t]. + - `userCPUTime` {integer} maps to `ru_utime` computed in microseconds. It is + the same value as [`process.cpuUsage().user`][process.cpuusage]. + - `systemCPUTime` {integer} maps to `ru_stime` computed in microseconds. It is + the same value as [`process.cpuUsage().system`][process.cpuusage]. + - `maxRSS` {integer} maps to `ru_maxrss` which is the maximum resident set size used in kilobytes. - * `sharedMemorySize` {integer} maps to `ru_ixrss` but is not supported by - any platform. - * `unsharedDataSize` {integer} maps to `ru_idrss` but is not supported by - any platform. - * `unsharedStackSize` {integer} maps to `ru_isrss` but is not supported by - any platform. - * `minorPageFault` {integer} maps to `ru_minflt` which is the number of - minor page faults for the process, see - [this article for more details][wikipedia_minor_fault]. - * `majorPageFault` {integer} maps to `ru_majflt` which is the number of - major page faults for the process, see - [this article for more details][wikipedia_major_fault]. This field is not - supported on Windows. - * `swappedOut` {integer} maps to `ru_nswap` but is not supported by any + - `sharedMemorySize` {integer} maps to `ru_ixrss` but is not supported by any + platform. + - `unsharedDataSize` {integer} maps to `ru_idrss` but is not supported by any + platform. + - `unsharedStackSize` {integer} maps to `ru_isrss` but is not supported by any + platform. + - `minorPageFault` {integer} maps to `ru_minflt` which is the number of minor + page faults for the process, see [this article for more + details][wikipedia_minor_fault]. + - `majorPageFault` {integer} maps to `ru_majflt` which is the number of major + page faults for the process, see [this article for more + details][wikipedia_major_fault]. This field is not supported on Windows. + - `swappedOut` {integer} maps to `ru_nswap` but is not supported by any platform. - * `fsRead` {integer} maps to `ru_inblock` which is the number of times the + - `fsRead` {integer} maps to `ru_inblock` which is the number of times the file system had to perform input. - * `fsWrite` {integer} maps to `ru_oublock` which is the number of times the + - `fsWrite` {integer} maps to `ru_oublock` which is the number of times the file system had to perform output. - * `ipcSent` {integer} maps to `ru_msgsnd` but is not supported by any + - `ipcSent` {integer} maps to `ru_msgsnd` but is not supported by any platform. - * `ipcReceived` {integer} maps to `ru_msgrcv` but is not supported by any + - `ipcReceived` {integer} maps to `ru_msgrcv` but is not supported by any platform. - * `signalsCount` {integer} maps to `ru_nsignals` but is not supported by any + - `signalsCount` {integer} maps to `ru_nsignals` but is not supported by any platform. - * `voluntaryContextSwitches` {integer} maps to `ru_nvcsw` which is the - number of times a CPU context switch resulted due to a process voluntarily - giving up the processor before its time slice was completed (usually to - await availability of a resource). This field is not supported on Windows. - * `involuntaryContextSwitches` {integer} maps to `ru_nivcsw` which is the + - `voluntaryContextSwitches` {integer} maps to `ru_nvcsw` which is the number + of times a CPU context switch resulted due to a process voluntarily giving + up the processor before its time slice was completed (usually to await + availability of a resource). This field is not supported on Windows. + - `involuntaryContextSwitches` {integer} maps to `ru_nivcsw` which is the number of times a CPU context switch resulted due to a higher priority - process becoming runnable or because the current process exceeded its - time slice. This field is not supported on Windows. + process becoming runnable or because the current process exceeded its time + slice. This field is not supported on Windows. ```js console.log(process.resourceUsage()); @@ -1933,15 +1986,16 @@ console.log(process.resourceUsage()); ``` ## process.send(message[, sendHandle[, options]][, callback]) + -* `message` {Object} -* `sendHandle` {net.Server|net.Socket} -* `options` {Object} -* `callback` {Function} -* Returns: {boolean} +- `message` {Object} +- `sendHandle` {net.Server|net.Socket} +- `options` {Object} +- `callback` {Function} +- Returns: {boolean} If Node.js is spawned with an IPC channel, the `process.send()` method can be used to send messages to the parent process. Messages will be received as a @@ -1954,16 +2008,17 @@ The message goes through serialization and parsing. The resulting message might not be the same as what is originally sent. ## process.setegid(id) + -* `id` {string|number} A group name or ID +- `id` {string|number} A group name or ID The `process.setegid()` method sets the effective group identity of the process. -(See setegid(2).) The `id` can be passed as either a numeric ID or a group -name string. If a group name is specified, this method blocks while resolving -the associated a numeric ID. +(See setegid(2).) The `id` can be passed as either a numeric ID or a group name +string. If a group name is specified, this method blocks while resolving the +associated a numeric ID. ```js if (process.getegid && process.setegid) { @@ -1978,15 +2033,15 @@ if (process.getegid && process.setegid) { ``` This function is only available on POSIX platforms (i.e. not Windows or -Android). -This feature is not available in [`Worker`][] threads. +Android). This feature is not available in [`Worker`][] threads. ## process.seteuid(id) + -* `id` {string|number} A user name or ID +- `id` {string|number} A user name or ID The `process.seteuid()` method sets the effective user identity of the process. (See seteuid(2).) The `id` can be passed as either a numeric ID or a username @@ -2006,15 +2061,15 @@ if (process.geteuid && process.seteuid) { ``` This function is only available on POSIX platforms (i.e. not Windows or -Android). -This feature is not available in [`Worker`][] threads. +Android). This feature is not available in [`Worker`][] threads. ## process.setgid(id) + -* `id` {string|number} The group name or ID +- `id` {string|number} The group name or ID The `process.setgid()` method sets the group identity of the process. (See setgid(2).) The `id` can be passed as either a numeric ID or a group name @@ -2034,15 +2089,15 @@ if (process.getgid && process.setgid) { ``` This function is only available on POSIX platforms (i.e. not Windows or -Android). -This feature is not available in [`Worker`][] threads. +Android). This feature is not available in [`Worker`][] threads. ## process.setgroups(groups) + -* `groups` {integer[]} +- `groups` {integer[]} The `process.setgroups()` method sets the supplementary group IDs for the Node.js process. This is a privileged operation that requires the Node.js @@ -2051,15 +2106,15 @@ process to have `root` or the `CAP_SETGID` capability. The `groups` array can contain numeric group IDs, group names or both. This function is only available on POSIX platforms (i.e. not Windows or -Android). -This feature is not available in [`Worker`][] threads. +Android). This feature is not available in [`Worker`][] threads. ## process.setuid(id) + -* `id` {integer | string} +- `id` {integer | string} The `process.setuid(id)` method sets the user identity of the process. (See setuid(2).) The `id` can be passed as either a numeric ID or a username string. @@ -2079,58 +2134,55 @@ if (process.getuid && process.setuid) { ``` This function is only available on POSIX platforms (i.e. not Windows or -Android). -This feature is not available in [`Worker`][] threads. +Android). This feature is not available in [`Worker`][] threads. ## process.setUncaughtExceptionCaptureCallback(fn) + -* `fn` {Function|null} +- `fn` {Function|null} The `process.setUncaughtExceptionCaptureCallback()` function sets a function that will be invoked when an uncaught exception occurs, which will receive the exception value itself as its first argument. -If such a function is set, the [`'uncaughtException'`][] event will -not be emitted. If `--abort-on-uncaught-exception` was passed from the -command line or set through [`v8.setFlagsFromString()`][], the process will -not abort. +If such a function is set, the [`'uncaughtException'`][] event will not be +emitted. If `--abort-on-uncaught-exception` was passed from the command line or +set through [`v8.setFlagsFromString()`][], the process will not abort. To unset the capture function, `process.setUncaughtExceptionCaptureCallback(null)` may be used. Calling this method with a non-`null` argument while another capture function is set will throw an error. -Using this function is mutually exclusive with using the deprecated -[`domain`][] built-in module. +Using this function is mutually exclusive with using the deprecated [`domain`][] +built-in module. ## process.stderr -* {Stream} +- {Stream} -The `process.stderr` property returns a stream connected to -`stderr` (fd `2`). It is a [`net.Socket`][] (which is a [Duplex][] -stream) unless fd `2` refers to a file, in which case it is -a [Writable][] stream. +The `process.stderr` property returns a stream connected to `stderr` (fd `2`). +It is a [`net.Socket`][] (which is a [Duplex][] stream) unless fd `2` refers to +a file, in which case it is a [Writable][] stream. -`process.stderr` differs from other Node.js streams in important ways. See -[note on process I/O][] for more information. +`process.stderr` differs from other Node.js streams in important ways. See [note +on process I/O][] for more information. ## process.stdin -* {Stream} +- {Stream} -The `process.stdin` property returns a stream connected to -`stdin` (fd `0`). It is a [`net.Socket`][] (which is a [Duplex][] -stream) unless fd `0` refers to a file, in which case it is -a [Readable][] stream. +The `process.stdin` property returns a stream connected to `stdin` (fd `0`). It +is a [`net.Socket`][] (which is a [Duplex][] stream) unless fd `0` refers to a +file, in which case it is a [Readable][] stream. ```js -process.stdin.setEncoding('utf8'); +process.stdin.setEncoding("utf8"); -process.stdin.on('readable', () => { +process.stdin.on("readable", () => { let chunk; // Use a loop to make sure we read all available data. while ((chunk = process.stdin.read()) !== null) { @@ -2138,27 +2190,26 @@ process.stdin.on('readable', () => { } }); -process.stdin.on('end', () => { - process.stdout.write('end'); +process.stdin.on("end", () => { + process.stdout.write("end"); }); ``` -As a [Duplex][] stream, `process.stdin` can also be used in "old" mode that -is compatible with scripts written for Node.js prior to v0.10. -For more information see [Stream compatibility][]. +As a [Duplex][] stream, `process.stdin` can also be used in "old" mode that is +compatible with scripts written for Node.js prior to v0.10. For more information +see [Stream compatibility][]. -In "old" streams mode the `stdin` stream is paused by default, so one -must call `process.stdin.resume()` to read from it. Note also that calling +In "old" streams mode the `stdin` stream is paused by default, so one must call +`process.stdin.resume()` to read from it. Note also that calling `process.stdin.resume()` itself would switch stream to "old" mode. ## process.stdout -* {Stream} +- {Stream} -The `process.stdout` property returns a stream connected to -`stdout` (fd `1`). It is a [`net.Socket`][] (which is a [Duplex][] -stream) unless fd `1` refers to a file, in which case it is -a [Writable][] stream. +The `process.stdout` property returns a stream connected to `stdout` (fd `1`). +It is a [`net.Socket`][] (which is a [Duplex][] stream) unless fd `1` refers to +a file, in which case it is a [Writable][] stream. For example, to copy `process.stdin` to `process.stdout`: @@ -2166,8 +2217,8 @@ For example, to copy `process.stdin` to `process.stdout`: process.stdin.pipe(process.stdout); ``` -`process.stdout` differs from other Node.js streams in important ways. See -[note on process I/O][] for more information. +`process.stdout` differs from other Node.js streams in important ways. See [note +on process I/O][] for more information. ### A note on process I/O @@ -2176,21 +2227,21 @@ important ways: 1. They are used internally by [`console.log()`][] and [`console.error()`][], respectively. -2. Writes may be synchronous depending on what the stream is connected to - and whether the system is Windows or POSIX: - * Files: *synchronous* on Windows and POSIX - * TTYs (Terminals): *asynchronous* on Windows, *synchronous* on POSIX - * Pipes (and sockets): *synchronous* on Windows, *asynchronous* on POSIX +2. Writes may be synchronous depending on what the stream is connected to and + whether the system is Windows or POSIX: + - Files: _synchronous_ on Windows and POSIX + - TTYs (Terminals): _asynchronous_ on Windows, _synchronous_ on POSIX + - Pipes (and sockets): _synchronous_ on Windows, _asynchronous_ on POSIX -These behaviors are partly for historical reasons, as changing them would -create backwards incompatibility, but they are also expected by some users. +These behaviors are partly for historical reasons, as changing them would create +backwards incompatibility, but they are also expected by some users. Synchronous writes avoid problems such as output written with `console.log()` or `console.error()` being unexpectedly interleaved, or not written at all if `process.exit()` is called before an asynchronous write completes. See [`process.exit()`][] for more information. -***Warning***: Synchronous writes block the event loop until the write has +**_Warning_**: Synchronous writes block the event loop until the write has completed. This can be near instantaneous in the case of output to a file, but under high system load, pipes that are not being read at the receiving end, or with slow terminals or file systems, its possible for the event loop to be @@ -2218,11 +2269,12 @@ false See the [TTY][] documentation for more information. ## process.throwDeprecation + -* {boolean} +- {boolean} The `process.throwDeprecation` property indicates whether the `--throw-deprecation` flag is set on the current Node.js process. See the @@ -2231,15 +2283,16 @@ documentation for the [`'warning'` event][process_warning] and the flag's behavior. ## process.title + -* {string} +- {string} -The `process.title` property returns the current process title (i.e. returns -the current value of `ps`). Assigning a new value to `process.title` modifies -the current value of `ps`. +The `process.title` property returns the current process title (i.e. returns the +current value of `ps`). Assigning a new value to `process.title` modifies the +current value of `ps`. When a new value is assigned, different platforms will impose different maximum length restrictions on the title. Usually such restrictions are quite limited. @@ -2251,11 +2304,12 @@ memory but that was potentially insecure and confusing in some (rather obscure) cases. ## process.traceDeprecation + -* {boolean} +- {boolean} The `process.traceDeprecation` property indicates whether the `--trace-deprecation` flag is set on the current Node.js process. See the @@ -2264,11 +2318,12 @@ documentation for the [`'warning'` event][process_warning] and the flag's behavior. ## process.umask([mask]) + -* `mask` {number} +- `mask` {number} The `process.umask()` method sets or returns the Node.js process's file mode creation mask. Child processes inherit the mask from the parent process. Invoked @@ -2287,11 +2342,12 @@ console.log( umask will result in a thrown exception. ## process.uptime() + -* Returns: {number} +- Returns: {number} The `process.uptime()` method returns the number of seconds the current Node.js process has been running. @@ -2300,11 +2356,12 @@ The return value includes fractions of a second. Use `Math.floor()` to get whole seconds. ## process.version + -* {string} +- {string} The `process.version` property returns the Node.js version string. @@ -2313,6 +2370,7 @@ console.log(`Version: ${process.version}`); ``` ## process.versions + -* {Object} +- {Object} The `process.versions` property returns an object listing the version strings of Node.js and its dependencies. `process.versions.modules` indicates the current @@ -2357,97 +2415,99 @@ Will generate an object similar to: ## Exit Codes -Node.js will normally exit with a `0` status code when no more async -operations are pending. The following status codes are used in other -cases: - -* `1` **Uncaught Fatal Exception** - There was an uncaught exception, - and it was not handled by a domain or an [`'uncaughtException'`][] event - handler. -* `2` - Unused (reserved by Bash for builtin misuse) -* `3` **Internal JavaScript Parse Error** - The JavaScript source code - internal in Node.js's bootstrapping process caused a parse error. This - is extremely rare, and generally can only happen during development - of Node.js itself. -* `4` **Internal JavaScript Evaluation Failure** - The JavaScript - source code internal in Node.js's bootstrapping process failed to - return a function value when evaluated. This is extremely rare, and - generally can only happen during development of Node.js itself. -* `5` **Fatal Error** - There was a fatal unrecoverable error in V8. - Typically a message will be printed to stderr with the prefix `FATAL - ERROR`. -* `6` **Non-function Internal Exception Handler** - There was an - uncaught exception, but the internal fatal exception handler - function was somehow set to a non-function, and could not be called. -* `7` **Internal Exception Handler Run-Time Failure** - There was an - uncaught exception, and the internal fatal exception handler - function itself threw an error while attempting to handle it. This - can happen, for example, if an [`'uncaughtException'`][] or - `domain.on('error')` handler throws an error. -* `8` - Unused. In previous versions of Node.js, exit code 8 sometimes - indicated an uncaught exception. -* `9` - **Invalid Argument** - Either an unknown option was specified, - or an option requiring a value was provided without a value. -* `10` **Internal JavaScript Run-Time Failure** - The JavaScript - source code internal in Node.js's bootstrapping process threw an error - when the bootstrapping function was called. This is extremely rare, - and generally can only happen during development of Node.js itself. -* `12` **Invalid Debug Argument** - The `--inspect` and/or `--inspect-brk` +Node.js will normally exit with a `0` status code when no more async operations +are pending. The following status codes are used in other cases: + +- `1` **Uncaught Fatal Exception** - There was an uncaught exception, and it was + not handled by a domain or an [`'uncaughtException'`][] event handler. +- `2` - Unused (reserved by Bash for builtin misuse) +- `3` **Internal JavaScript Parse Error** - The JavaScript source code internal + in Node.js's bootstrapping process caused a parse error. This is extremely + rare, and generally can only happen during development of Node.js itself. +- `4` **Internal JavaScript Evaluation Failure** - The JavaScript source code + internal in Node.js's bootstrapping process failed to return a function value + when evaluated. This is extremely rare, and generally can only happen during + development of Node.js itself. +- `5` **Fatal Error** - There was a fatal unrecoverable error in V8. Typically a + message will be printed to stderr with the prefix `FATAL ERROR`. +- `6` **Non-function Internal Exception Handler** - There was an uncaught + exception, but the internal fatal exception handler function was somehow set + to a non-function, and could not be called. +- `7` **Internal Exception Handler Run-Time Failure** - There was an uncaught + exception, and the internal fatal exception handler function itself threw an + error while attempting to handle it. This can happen, for example, if an + [`'uncaughtException'`][] or `domain.on('error')` handler throws an error. +- `8` - Unused. In previous versions of Node.js, exit code 8 sometimes indicated + an uncaught exception. +- `9` - **Invalid Argument** - Either an unknown option was specified, or an + option requiring a value was provided without a value. +- `10` **Internal JavaScript Run-Time Failure** - The JavaScript source code + internal in Node.js's bootstrapping process threw an error when the + bootstrapping function was called. This is extremely rare, and generally can + only happen during development of Node.js itself. +- `12` **Invalid Debug Argument** - The `--inspect` and/or `--inspect-brk` options were set, but the port number chosen was invalid or unavailable. -* `>128` **Signal Exits** - If Node.js receives a fatal signal such as - `SIGKILL` or `SIGHUP`, then its exit code will be `128` plus the - value of the signal code. This is a standard POSIX practice, since - exit codes are defined to be 7-bit integers, and signal exits set - the high-order bit, and then contain the value of the signal code. - For example, signal `SIGABRT` has value `6`, so the expected exit - code will be `128` + `6`, or `134`. +- `>128` **Signal Exits** - If Node.js receives a fatal signal such as `SIGKILL` + or `SIGHUP`, then its exit code will be `128` plus the value of the signal + code. This is a standard POSIX practice, since exit codes are defined to be + 7-bit integers, and signal exits set the high-order bit, and then contain the + value of the signal code. For example, signal `SIGABRT` has value `6`, so the + expected exit code will be `128` + `6`, or `134`. [`'exit'`]: #process_event_exit [`'message'`]: child_process.html#child_process_event_message -[`'uncaughtException'`]: #process_event_uncaughtexception -[`ChildProcess.disconnect()`]: child_process.html#child_process_subprocess_disconnect -[`ChildProcess.send()`]: child_process.html#child_process_subprocess_send_message_sendhandle_options_callback -[`ChildProcess`]: child_process.html#child_process_class_childprocess -[`Error`]: errors.html#errors_class_error -[`EventEmitter`]: events.html#events_class_eventemitter -[`NODE_OPTIONS`]: cli.html#cli_node_options_options -[`Worker`]: worker_threads.html#worker_threads_class_worker +[`'uncaughtexception'`]: #process_event_uncaughtexception +[`childprocess.disconnect()`]: + child_process.html#child_process_subprocess_disconnect +[`childprocess.send()`]: + child_process.html#child_process_subprocess_send_message_sendhandle_options_callback +[`childprocess`]: child_process.html#child_process_class_childprocess +[`error`]: errors.html#errors_class_error +[`eventemitter`]: events.html#events_class_eventemitter +[`node_options`]: cli.html#cli_node_options_options +[`worker`]: worker_threads.html#worker_threads_class_worker [`console.error()`]: console.html#console_console_error_data_args [`console.log()`]: console.html#console_console_log_data_args [`domain`]: domain.html -[`net.Server`]: net.html#net_class_net_server -[`net.Socket`]: net.html#net_class_net_socket +[`net.server`]: net.html#net_class_net_server +[`net.socket`]: net.html#net_class_net_socket [`os.constants.dlopen`]: os.html#os_dlopen_constants [`process.argv`]: #process_process_argv [`process.config`]: #process_process_config -[`process.execPath`]: #process_process_execpath +[`process.execpath`]: #process_process_execpath [`process.exit()`]: #process_process_exit_code -[`process.exitCode`]: #process_process_exitcode +[`process.exitcode`]: #process_process_exitcode [`process.hrtime()`]: #process_process_hrtime_time [`process.hrtime.bigint()`]: #process_process_hrtime_bigint [`process.kill()`]: #process_process_kill_pid_signal -[`process.setUncaughtExceptionCaptureCallback()`]: process.html#process_process_setuncaughtexceptioncapturecallback_fn -[`promise.catch()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/catch -[`Promise.race()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/race +[`process.setuncaughtexceptioncapturecallback()`]: + process.html#process_process_setuncaughtexceptioncapturecallback_fn +[`promise.catch()`]: + https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/catch +[`promise.race()`]: + https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/race [`require()`]: globals.html#globals_require [`require.main`]: modules.html#modules_accessing_the_main_module [`require.resolve()`]: modules.html#modules_require_resolve_request_options [`subprocess.kill()`]: child_process.html#child_process_subprocess_kill_signal -[`v8.setFlagsFromString()`]: v8.html#v8_v8_setflagsfromstring_flags -[Android building]: https://github.com/nodejs/node/blob/master/BUILDING.md#androidandroid-based-devices-eg-firefox-os -[Child Process]: child_process.html -[Cluster]: cluster.html -[Duplex]: stream.html#stream_duplex_and_transform_streams -[Event Loop]: https://nodejs.org/en/docs/guides/event-loop-timers-and-nexttick/#process-nexttick -[LTS]: https://github.com/nodejs/Release -[Readable]: stream.html#stream_readable_streams -[Signal Events]: #process_signal_events -[Stream compatibility]: stream.html#stream_compatibility_with_older_node_js_versions -[TTY]: tty.html#tty_tty -[Writable]: stream.html#stream_writable_streams +[`v8.setflagsfromstring()`]: v8.html#v8_v8_setflagsfromstring_flags +[android building]: + https://github.com/nodejs/node/blob/master/BUILDING.md#androidandroid-based-devices-eg-firefox-os +[child process]: child_process.html +[cluster]: cluster.html +[duplex]: stream.html#stream_duplex_and_transform_streams +[event loop]: + https://nodejs.org/en/docs/guides/event-loop-timers-and-nexttick/#process-nexttick +[lts]: https://github.com/nodejs/Release +[readable]: stream.html#stream_readable_streams +[signal events]: #process_signal_events +[stream compatibility]: + stream.html#stream_compatibility_with_older_node_js_versions +[tty]: tty.html#tty_tty +[writable]: stream.html#stream_writable_streams [debugger]: debugger.html -[note on process I/O]: process.html#process_a_note_on_process_i_o -[process.cpuUsage]: #process_process_cpuusage_previousvalue +[note on process i/o]: process.html#process_a_note_on_process_i_o +[process.cpuusage]: #process_process_cpuusage_previousvalue [process_emit_warning]: #process_process_emitwarning_warning_type_code_ctor [process_warning]: #process_event_warning [report documentation]: report.html diff --git a/doc/api/report.md b/doc/api/report.md index d08de8636a3bb8..880320d6dec438 100644 --- a/doc/api/report.md +++ b/doc/api/report.md @@ -9,15 +9,15 @@ Delivers a JSON-formatted diagnostic summary, written to a file. -The report is intended for development, test and production use, to capture -and preserve information for problem determination. It includes JavaScript -and native stack traces, heap statistics, platform information, resource -usage etc. With the report option enabled, diagnostic reports can be triggered -on unhandled exceptions, fatal errors and user signals, in addition to -triggering programmatically through API calls. +The report is intended for development, test and production use, to capture and +preserve information for problem determination. It includes JavaScript and +native stack traces, heap statistics, platform information, resource usage etc. +With the report option enabled, diagnostic reports can be triggered on unhandled +exceptions, fatal errors and user signals, in addition to triggering +programmatically through API calls. -A complete example report that was generated on an uncaught exception -is provided below for reference. +A complete example report that was generated on an uncaught exception is +provided below for reference. ```json { @@ -191,7 +191,7 @@ is provided below for reference. "resourceUsage": { "userCpuSeconds": 0.069595, "kernelCpuSeconds": 0.019163, - "cpuConsumptionPercent": 0.000000, + "cpuConsumptionPercent": 0.0, "maxRss": 18079744, "pageFaults": { "IORequired": 0, @@ -205,7 +205,7 @@ is provided below for reference. "uvthreadResourceUsage": { "userCpuSeconds": 0.068457, "kernelCpuSeconds": 0.019127, - "cpuConsumptionPercent": 0.000000, + "cpuConsumptionPercent": 0.0, "fsActivity": { "reads": 0, "writes": 0 @@ -395,42 +395,39 @@ node --experimental-report --report-uncaught-exception \ --report-on-signal --report-on-fatalerror app.js ``` -* `--experimental-report` Enables the diagnostic report feature. - In the absence of this flag, use of all other related options will result in - an error. +- `--experimental-report` Enables the diagnostic report feature. In the absence + of this flag, use of all other related options will result in an error. -* `--report-uncaught-exception` Enables report to be generated on -un-caught exceptions. Useful when inspecting JavaScript stack in conjunction -with native stack and other runtime environment data. +- `--report-uncaught-exception` Enables report to be generated on un-caught + exceptions. Useful when inspecting JavaScript stack in conjunction with native + stack and other runtime environment data. -* `--report-on-signal` Enables report to be generated upon receiving -the specified (or predefined) signal to the running Node.js process. (See below -on how to modify the signal that triggers the report.) Default signal is `SIGUSR2`. -Useful when a report needs to be triggered from another program. -Application monitors may leverage this feature to collect report at regular -intervals and plot rich set of internal runtime data to their views. +- `--report-on-signal` Enables report to be generated upon receiving the + specified (or predefined) signal to the running Node.js process. (See below on + how to modify the signal that triggers the report.) Default signal is + `SIGUSR2`. Useful when a report needs to be triggered from another program. + Application monitors may leverage this feature to collect report at regular + intervals and plot rich set of internal runtime data to their views. Signal based report generation is not supported in Windows. Under normal circumstances, there is no need to modify the report triggering -signal. However, if `SIGUSR2` is already used for other purposes, then this -flag helps to change the signal for report generation and preserve the original +signal. However, if `SIGUSR2` is already used for other purposes, then this flag +helps to change the signal for report generation and preserve the original meaning of `SIGUSR2` for the said purposes. -* `--report-on-fatalerror` Enables the report to be triggered on -fatal errors (internal errors within the Node.js runtime, such as out of memory) -that leads to termination of the application. Useful to inspect various -diagnostic data elements such as heap, stack, event loop state, resource -consumption etc. to reason about the fatal error. +- `--report-on-fatalerror` Enables the report to be triggered on fatal errors + (internal errors within the Node.js runtime, such as out of memory) that leads + to termination of the application. Useful to inspect various diagnostic data + elements such as heap, stack, event loop state, resource consumption etc. to + reason about the fatal error. -* `--report-directory` Location at which the report will be -generated. +- `--report-directory` Location at which the report will be generated. -* `--report-filename` Name of the file to which the report will be -written. +- `--report-filename` Name of the file to which the report will be written. -* `--report-signal` Sets or resets the signal for report generation -(not supported on Windows). Default signal is `SIGUSR2`. +- `--report-signal` Sets or resets the signal for report generation (not + supported on Windows). Default signal is `SIGUSR2`. A report can also be triggered via an API call from a JavaScript application: @@ -438,46 +435,46 @@ A report can also be triggered via an API call from a JavaScript application: process.report.writeReport(); ``` -This function takes an optional additional argument `filename`, which is -the name of a file into which the report is written. +This function takes an optional additional argument `filename`, which is the +name of a file into which the report is written. ```js -process.report.writeReport('./foo.json'); +process.report.writeReport("./foo.json"); ``` This function takes an optional additional argument `err` - an `Error` object that will be used as the context for the JavaScript stack printed in the report. When using report to handle errors in a callback or an exception handler, this -allows the report to include the location of the original error as well -as where it was handled. +allows the report to include the location of the original error as well as where +it was handled. ```js try { - process.chdir('/non-existent-path'); + process.chdir("/non-existent-path"); } catch (err) { process.report.writeReport(err); } // Any other code ``` -If both filename and error object are passed to `writeReport()` the -error object must be the second parameter. +If both filename and error object are passed to `writeReport()` the error object +must be the second parameter. ```js try { - process.chdir('/non-existent-path'); + process.chdir("/non-existent-path"); } catch (err) { process.report.writeReport(filename, err); } // Any other code ``` -The content of the diagnostic report can be returned as a JavaScript Object -via an API call from a JavaScript application: +The content of the diagnostic report can be returned as a JavaScript Object via +an API call from a JavaScript application: ```js const report = process.report.getReport(); -console.log(typeof report === 'object'); // true +console.log(typeof report === "object"); // true // Similar to process.report.writeReport() output console.log(JSON.stringify(report, null, 2)); @@ -487,13 +484,13 @@ This function takes an optional additional argument `err` - an `Error` object that will be used as the context for the JavaScript stack printed in the report. ```js -const report = process.report.getReport(new Error('custom error')); -console.log(typeof report === 'object'); // true +const report = process.report.getReport(new Error("custom error")); +console.log(typeof report === "object"); // true ``` -The API versions are useful when inspecting the runtime state from within -the application, in expectation of self-adjusting the resource consumption, -load balancing, monitoring etc. +The API versions are useful when inspecting the runtime state from within the +application, in expectation of self-adjusting the resource consumption, load +balancing, monitoring etc. The content of the report consists of a header section containing the event type, date, time, PID and Node.js version, sections containing JavaScript and @@ -510,16 +507,16 @@ Node.js report completed > ``` -When a report is written, start and end messages are issued to stderr -and the filename of the report is returned to the caller. The default filename -includes the date, time, PID and a sequence number. The sequence number helps -in associating the report dump with the runtime state if generated multiple -times for the same Node.js process. +When a report is written, start and end messages are issued to stderr and the +filename of the report is returned to the caller. The default filename includes +the date, time, PID and a sequence number. The sequence number helps in +associating the report dump with the runtime state if generated multiple times +for the same Node.js process. ## Configuration -Additional runtime configuration of report generation is available via -the following properties of `process.report`: +Additional runtime configuration of report generation is available via the +following properties of `process.report`: `reportOnFatalError` triggers diagnostic reporting on fatal errors when `true`. Defaults to `false`. @@ -530,20 +527,19 @@ not supported on Windows. Defaults to `false`. `reportOnUncaughtException` triggers diagnostic reporting on uncaught exception when `true`. Defaults to `false`. -`signal` specifies the POSIX signal identifier that will be used -to intercept external triggers for report generation. Defaults to -`'SIGUSR2'`. +`signal` specifies the POSIX signal identifier that will be used to intercept +external triggers for report generation. Defaults to `'SIGUSR2'`. -`filename` specifies the name of the output file in the file system. -Special meaning is attached to `stdout` and `stderr`. Usage of these -will result in report being written to the associated standard streams. -In cases where standard streams are used, the value in `directory` is ignored. -URLs are not supported. Defaults to a composite filename that contains -timestamp, PID and sequence number. +`filename` specifies the name of the output file in the file system. Special +meaning is attached to `stdout` and `stderr`. Usage of these will result in +report being written to the associated standard streams. In cases where standard +streams are used, the value in `directory` is ignored. URLs are not supported. +Defaults to a composite filename that contains timestamp, PID and sequence +number. `directory` specifies the filesystem directory where the report will be written. -URLs are not supported. Defaults to the current working directory of the -Node.js process. +URLs are not supported. Defaults to the current working directory of the Node.js +process. ```js // Trigger report only on uncaught exceptions. @@ -560,11 +556,11 @@ process.report.reportOnUncaughtException = false; process.report.reportOnFatalError = false; process.report.reportOnUncaughtException = false; process.report.reportOnSignal = true; -process.report.signal = 'SIGQUIT'; +process.report.signal = "SIGQUIT"; ``` -Configuration on module initialization is also available via -environment variables: +Configuration on module initialization is also available via environment +variables: ```bash NODE_OPTIONS="--experimental-report --report-uncaught-exception \ @@ -573,7 +569,7 @@ NODE_OPTIONS="--experimental-report --report-uncaught-exception \ --report-directory=/home/nodeuser" ``` -Specific API documentation can be found under -[`process API documentation`][] section. +Specific API documentation can be found under [`process API documentation`][] +section. -[`process API documentation`]: process.html +[`process api documentation`]: process.html diff --git a/doc/api/stream.md b/doc/api/stream.md index 1f0f08a7b86a7f..2cfd02546359ee 100644 --- a/doc/api/stream.md +++ b/doc/api/stream.md @@ -7,9 +7,9 @@ A stream is an abstract interface for working with streaming data in Node.js. The `stream` module provides an API for implementing the stream interface. -There are many stream objects provided by Node.js. For instance, a -[request to an HTTP server][http-incoming-message] and [`process.stdout`][] -are both stream instances. +There are many stream objects provided by Node.js. For instance, a [request to +an HTTP server][http-incoming-message] and [`process.stdout`][] are both stream +instances. Streams can be readable, writable, or both. All streams are instances of [`EventEmitter`][]. @@ -17,7 +17,7 @@ Streams can be readable, writable, or both. All streams are instances of To access the `stream` module: ```js -const stream = require('stream'); +const stream = require("stream"); ``` The `stream` module is useful for creating new types of stream instances. It is @@ -33,13 +33,13 @@ second section explains how to create new types of streams. There are four fundamental stream types within Node.js: -* [`Writable`][] - streams to which data can be written (for example, +- [`Writable`][] - streams to which data can be written (for example, [`fs.createWriteStream()`][]). -* [`Readable`][] - streams from which data can be read (for example, +- [`Readable`][] - streams from which data can be read (for example, [`fs.createReadStream()`][]). -* [`Duplex`][] - streams that are both `Readable` and `Writable` (for example, +- [`Duplex`][] - streams that are both `Readable` and `Writable` (for example, [`net.Socket`][]). -* [`Transform`][] - `Duplex` streams that can modify or transform the data as it +- [`Transform`][] - `Duplex` streams that can modify or transform the data as it is written and read (for example, [`zlib.createDeflate()`][]). Additionally, this module includes the utility functions @@ -55,8 +55,8 @@ which serves a special purpose within streams). Such streams are considered to operate in "object mode". Stream instances are switched into object mode using the `objectMode` option -when the stream is created. Attempting to switch an existing stream into -object mode is not safe. +when the stream is created. Attempting to switch an existing stream into object +mode is not safe. ### Buffering @@ -68,38 +68,38 @@ buffer that can be retrieved using `writable.writableBuffer` or The amount of data potentially buffered depends on the `highWaterMark` option passed into the stream's constructor. For normal streams, the `highWaterMark` -option specifies a [total number of bytes][hwm-gotcha]. For streams operating -in object mode, the `highWaterMark` specifies a total number of objects. +option specifies a [total number of bytes][hwm-gotcha]. For streams operating in +object mode, the `highWaterMark` specifies a total number of objects. Data is buffered in `Readable` streams when the implementation calls -[`stream.push(chunk)`][stream-push]. If the consumer of the Stream does not -call [`stream.read()`][stream-read], the data will sit in the internal -queue until it is consumed. +[`stream.push(chunk)`][stream-push]. If the consumer of the Stream does not call +[`stream.read()`][stream-read], the data will sit in the internal queue until it +is consumed. Once the total size of the internal read buffer reaches the threshold specified by `highWaterMark`, the stream will temporarily stop reading data from the underlying resource until the data currently buffered can be consumed (that is, -the stream will stop calling the internal `readable._read()` method that is -used to fill the read buffer). +the stream will stop calling the internal `readable._read()` method that is used +to fill the read buffer). Data is buffered in `Writable` streams when the [`writable.write(chunk)`][stream-write] method is called repeatedly. While the total size of the internal write buffer is below the threshold set by -`highWaterMark`, calls to `writable.write()` will return `true`. Once -the size of the internal buffer reaches or exceeds the `highWaterMark`, `false` -will be returned. +`highWaterMark`, calls to `writable.write()` will return `true`. Once the size +of the internal buffer reaches or exceeds the `highWaterMark`, `false` will be +returned. -A key goal of the `stream` API, particularly the [`stream.pipe()`] method, -is to limit the buffering of data to acceptable levels such that sources and +A key goal of the `stream` API, particularly the [`stream.pipe()`] method, is to +limit the buffering of data to acceptable levels such that sources and destinations of differing speeds will not overwhelm the available memory. Because [`Duplex`][] and [`Transform`][] streams are both `Readable` and -`Writable`, each maintains *two* separate internal buffers used for reading and +`Writable`, each maintains _two_ separate internal buffers used for reading and writing, allowing each side to operate independently of the other while maintaining an appropriate and efficient flow of data. For example, [`net.Socket`][] instances are [`Duplex`][] streams whose `Readable` side allows -consumption of data received *from* the socket and whose `Writable` side allows -writing data *to* the socket. Because data may be written to the socket at a +consumption of data received _from_ the socket and whose `Writable` side allows +writing data _to_ the socket. Because data may be written to the socket at a faster or slower rate than data is received, it is important for each side to operate (and buffer) independently of the other. @@ -112,24 +112,24 @@ manner. The following is an example of using streams in a Node.js application that implements an HTTP server: ```js -const http = require('http'); +const http = require("http"); const server = http.createServer((req, res) => { // `req` is an http.IncomingMessage, which is a Readable Stream. // `res` is an http.ServerResponse, which is a Writable Stream. - let body = ''; + let body = ""; // Get the data as utf8 strings. // If an encoding is not set, Buffer objects will be received. - req.setEncoding('utf8'); + req.setEncoding("utf8"); // Readable streams emit 'data' events once a listener is added. - req.on('data', (chunk) => { + req.on("data", chunk => { body += chunk; }); // The 'end' event indicates that the entire body has been received. - req.on('end', () => { + req.on("end", () => { try { const data = JSON.parse(body); // Write back something interesting to the user: @@ -166,28 +166,28 @@ various ways to communicate the current state of the stream. [`Duplex`][] and [`Transform`][] streams are both [`Writable`][] and [`Readable`][]. -Applications that are either writing data to or consuming data from a stream -are not required to implement the stream interfaces directly and will generally -have no reason to call `require('stream')`. +Applications that are either writing data to or consuming data from a stream are +not required to implement the stream interfaces directly and will generally have +no reason to call `require('stream')`. -Developers wishing to implement new types of streams should refer to the -section [API for Stream Implementers][]. +Developers wishing to implement new types of streams should refer to the section +[API for Stream Implementers][]. ### Writable Streams -Writable streams are an abstraction for a *destination* to which data is +Writable streams are an abstraction for a _destination_ to which data is written. Examples of [`Writable`][] streams include: -* [HTTP requests, on the client][] -* [HTTP responses, on the server][] -* [fs write streams][] -* [zlib streams][zlib] -* [crypto streams][crypto] -* [TCP sockets][] -* [child process stdin][] -* [`process.stdout`][], [`process.stderr`][] +- [HTTP requests, on the client][] +- [HTTP responses, on the server][] +- [fs write streams][] +- [zlib streams][zlib] +- [crypto streams][crypto] +- [TCP sockets][] +- [child process stdin][] +- [`process.stdout`][], [`process.stderr`][] Some of these examples are actually [`Duplex`][] streams that implement the [`Writable`][] interface. @@ -201,12 +201,13 @@ in the example below: ```js const myStream = getWritableStreamSomehow(); -myStream.write('some data'); -myStream.write('some more data'); -myStream.end('done writing data'); +myStream.write("some data"); +myStream.write("some more data"); +myStream.end("done writing data"); ``` #### Class: stream.Writable + @@ -214,6 +215,7 @@ added: v0.9.4 ##### Event: 'close' + If a call to [`stream.write(chunk)`][stream-write] returns `false`, the -`'drain'` event will be emitted when it is appropriate to resume writing data -to the stream. +`'drain'` event will be emitted when it is appropriate to resume writing data to +the stream. ```js // Write the data to the supplied writable stream one million times. @@ -261,30 +264,31 @@ function writeOneMillionTimes(writer, data, encoding, callback) { if (i > 0) { // Had to stop early! // Write some more once it drains. - writer.once('drain', write); + writer.once("drain", write); } } } ``` ##### Event: 'error' + -* {Error} +- {Error} The `'error'` event is emitted if an error occurred while writing or piping data. The listener callback is passed a single `Error` argument when called. The stream is not closed when the `'error'` event is emitted unless the -[`autoDestroy`][writable-new] option was set to `true` when creating the -stream. +[`autoDestroy`][writable-new] option was set to `true` when creating the stream. -After `'error'`, no further events other than `'close'` *should* be emitted +After `'error'`, no further events other than `'close'` _should_ be emitted (including `'error'` events). ##### Event: 'finish' + @@ -297,39 +301,41 @@ const writer = getWritableStreamSomehow(); for (let i = 0; i < 100; i++) { writer.write(`hello, #${i}!\n`); } -writer.end('This is the end\n'); -writer.on('finish', () => { - console.log('All writes are now complete.'); +writer.end("This is the end\n"); +writer.on("finish", () => { + console.log("All writes are now complete."); }); ``` ##### Event: 'pipe' + -* `src` {stream.Readable} source stream that is piping to this writable +- `src` {stream.Readable} source stream that is piping to this writable -The `'pipe'` event is emitted when the [`stream.pipe()`][] method is called on -a readable stream, adding this writable to its set of destinations. +The `'pipe'` event is emitted when the [`stream.pipe()`][] method is called on a +readable stream, adding this writable to its set of destinations. ```js const writer = getWritableStreamSomehow(); const reader = getReadableStreamSomehow(); -writer.on('pipe', (src) => { - console.log('Something is piping into the writer.'); +writer.on("pipe", src => { + console.log("Something is piping into the writer."); assert.equal(src, reader); }); reader.pipe(writer); ``` ##### Event: 'unpipe' + -* `src` {stream.Readable} The source stream that - [unpiped][`stream.unpipe()`] this writable +- `src` {stream.Readable} The source stream that [unpiped][`stream.unpipe()`] + this writable The `'unpipe'` event is emitted when the [`stream.unpipe()`][] method is called on a [`Readable`][] stream, removing this [`Writable`][] from its set of @@ -341,8 +347,8 @@ This is also emitted in case this [`Writable`][] stream emits an error when a ```js const writer = getWritableStreamSomehow(); const reader = getReadableStreamSomehow(); -writer.on('unpipe', (src) => { - console.log('Something has stopped piping into the writer.'); +writer.on("unpipe", src => { + console.log("Something has stopped piping into the writer."); assert.equal(src, reader); }); reader.pipe(writer); @@ -350,6 +356,7 @@ reader.unpipe(writer); ``` ##### writable.cork() + @@ -367,34 +374,36 @@ buffered writes in a more optimized manner. See also: [`writable.uncork()`][]. ##### writable.destroy([error]) + -* `error` {Error} Optional, an error to emit with `'error'` event. -* Returns: {this} +- `error` {Error} Optional, an error to emit with `'error'` event. +- Returns: {this} Destroy the stream. Optionally emit an `'error'` event, and emit a `'close'` -event unless `emitClose` is set in `false`. After this call, the writable -stream has ended and subsequent calls to `write()` or `end()` will result in -an `ERR_STREAM_DESTROYED` error. -This is a destructive and immediate way to destroy a stream. Previous calls to -`write()` may not have drained, and may trigger an `ERR_STREAM_DESTROYED` error. -Use `end()` instead of destroy if data should flush before close, or wait for -the `'drain'` event before destroying the stream. -Implementors should not override this method, -but instead implement [`writable._destroy()`][writable-_destroy]. +event unless `emitClose` is set in `false`. After this call, the writable stream +has ended and subsequent calls to `write()` or `end()` will result in an +`ERR_STREAM_DESTROYED` error. This is a destructive and immediate way to destroy +a stream. Previous calls to `write()` may not have drained, and may trigger an +`ERR_STREAM_DESTROYED` error. Use `end()` instead of destroy if data should +flush before close, or wait for the `'drain'` event before destroying the +stream. Implementors should not override this method, but instead implement +[`writable._destroy()`][writable-_destroy]. ##### writable.destroyed + -* {boolean} +- {boolean} Is `true` after [`writable.destroy()`][writable-destroy] has been called. ##### writable.end([chunk][, encoding][, callback]) + -* `chunk` {string|Buffer|Uint8Array|any} Optional data to write. For streams - not operating in object mode, `chunk` must be a string, `Buffer` or - `Uint8Array`. For object mode streams, `chunk` may be any JavaScript value - other than `null`. -* `encoding` {string} The encoding if `chunk` is a string -* `callback` {Function} Optional callback for when the stream is finished -* Returns: {this} +- `chunk` {string|Buffer|Uint8Array|any} Optional data to write. For streams not + operating in object mode, `chunk` must be a string, `Buffer` or `Uint8Array`. + For object mode streams, `chunk` may be any JavaScript value other than + `null`. +- `encoding` {string} The encoding if `chunk` is a string +- `callback` {Function} Optional callback for when the stream is finished +- Returns: {this} -Calling the `writable.end()` method signals that no more data will be written -to the [`Writable`][]. The optional `chunk` and `encoding` arguments allow one +Calling the `writable.end()` method signals that no more data will be written to +the [`Writable`][]. The optional `chunk` and `encoding` arguments allow one final additional chunk of data to be written immediately before closing the stream. If provided, the optional `callback` function is attached as a listener for the [`'finish'`][] event. @@ -425,14 +434,15 @@ Calling the [`stream.write()`][stream-write] method after calling ```js // Write 'hello, ' and then end with 'world!'. -const fs = require('fs'); -const file = fs.createWriteStream('example.txt'); -file.write('hello, '); -file.end('world!'); +const fs = require("fs"); +const file = fs.createWriteStream("example.txt"); +file.write("hello, "); +file.end("world!"); // Writing more now is not allowed! ``` ##### writable.setDefaultEncoding(encoding) + -* `encoding` {string} The new default encoding -* Returns: {this} +- `encoding` {string} The new default encoding +- Returns: {this} The `writable.setDefaultEncoding()` method sets the default `encoding` for a [`Writable`][] stream. ##### writable.uncork() + @@ -462,8 +473,8 @@ deferred using `process.nextTick()`. Doing so allows batching of all ```js stream.cork(); -stream.write('some '); -stream.write('data '); +stream.write("some "); +stream.write("data "); process.nextTick(() => stream.uncork()); ``` @@ -473,9 +484,9 @@ data. ```js stream.cork(); -stream.write('some '); +stream.write("some "); stream.cork(); -stream.write('data '); +stream.write("data "); process.nextTick(() => { stream.uncork(); // The data will not be flushed until uncork() is called a second time. @@ -486,65 +497,71 @@ process.nextTick(() => { See also: [`writable.cork()`][]. ##### writable.writable + -* {boolean} +- {boolean} Is `true` if it is safe to call [`writable.write()`][stream-write]. ##### writable.writableEnded + -* {boolean} +- {boolean} -Is `true` after [`writable.end()`][] has been called. This property -does not indicate whether the data has been flushed, for this use +Is `true` after [`writable.end()`][] has been called. This property does not +indicate whether the data has been flushed, for this use [`writable.writableFinished`][] instead. ##### writable.writableFinished + -* {boolean} +- {boolean} Is set to `true` immediately before the [`'finish'`][] event is emitted. ##### writable.writableHighWaterMark + -* {number} +- {number} -Return the value of `highWaterMark` passed when constructing this -`Writable`. +Return the value of `highWaterMark` passed when constructing this `Writable`. ##### writable.writableLength + -* {number} +- {number} -This property contains the number of bytes (or objects) in the queue -ready to be written. The value provides introspection data regarding -the status of the `highWaterMark`. +This property contains the number of bytes (or objects) in the queue ready to be +written. The value provides introspection data regarding the status of the +`highWaterMark`. ##### writable.writableObjectMode + -* {boolean} +- {boolean} Getter for the property `objectMode` of a given `Writable` stream. ##### writable.write(chunk[, encoding][, callback]) + -* `chunk` {string|Buffer|Uint8Array|any} Optional data to write. For streams - not operating in object mode, `chunk` must be a string, `Buffer` or - `Uint8Array`. For object mode streams, `chunk` may be any JavaScript value - other than `null`. -* `encoding` {string} The encoding, if `chunk` is a string -* `callback` {Function} Callback for when this chunk of data is flushed -* Returns: {boolean} `false` if the stream wishes for the calling code to - wait for the `'drain'` event to be emitted before continuing to write - additional data; otherwise `true`. +- `chunk` {string|Buffer|Uint8Array|any} Optional data to write. For streams not + operating in object mode, `chunk` must be a string, `Buffer` or `Uint8Array`. + For object mode streams, `chunk` may be any JavaScript value other than + `null`. +- `encoding` {string} The encoding, if `chunk` is a string +- `callback` {Function} Callback for when this chunk of data is flushed +- Returns: {boolean} `false` if the stream wishes for the calling code to wait + for the `'drain'` event to be emitted before continuing to write additional + data; otherwise `true`. The `writable.write()` method writes some data to the stream, and calls the -supplied `callback` once the data has been fully handled. If an error -occurs, the `callback` *may or may not* be called with the error as its -first argument. To reliably detect write errors, add a listener for the -`'error'` event. +supplied `callback` once the data has been fully handled. If an error occurs, +the `callback` _may or may not_ be called with the error as its first argument. +To reliably detect write errors, add a listener for the `'error'` event. The return value is `true` if the internal buffer is less than the `highWaterMark` configured when the stream was created after admitting `chunk`. -If `false` is returned, further attempts to write data to the stream should -stop until the [`'drain'`][] event is emitted. +If `false` is returned, further attempts to write data to the stream should stop +until the [`'drain'`][] event is emitted. While a stream is not draining, calls to `write()` will buffer `chunk`, and return false. Once all currently buffered chunks are drained (accepted for -delivery by the operating system), the `'drain'` event will be emitted. -It is recommended that once `write()` returns false, no more chunks be written -until the `'drain'` event is emitted. While calling `write()` on a stream that -is not draining is allowed, Node.js will buffer all written chunks until -maximum memory usage occurs, at which point it will abort unconditionally. -Even before it aborts, high memory usage will cause poor garbage collector -performance and high RSS (which is not typically released back to the system, -even after the memory is no longer required). Since TCP sockets may never -drain if the remote peer does not read the data, writing a socket that is -not draining may lead to a remotely exploitable vulnerability. - -Writing data while the stream is not draining is particularly -problematic for a [`Transform`][], because the `Transform` streams are paused -by default until they are piped or a `'data'` or `'readable'` event handler -is added. +delivery by the operating system), the `'drain'` event will be emitted. It is +recommended that once `write()` returns false, no more chunks be written until +the `'drain'` event is emitted. While calling `write()` on a stream that is not +draining is allowed, Node.js will buffer all written chunks until maximum memory +usage occurs, at which point it will abort unconditionally. Even before it +aborts, high memory usage will cause poor garbage collector performance and high +RSS (which is not typically released back to the system, even after the memory +is no longer required). Since TCP sockets may never drain if the remote peer +does not read the data, writing a socket that is not draining may lead to a +remotely exploitable vulnerability. + +Writing data while the stream is not draining is particularly problematic for a +[`Transform`][], because the `Transform` streams are paused by default until +they are piped or a `'data'` or `'readable'` event handler is added. If the data to be written can be generated or fetched on demand, it is recommended to encapsulate the logic into a [`Readable`][] and use -[`stream.pipe()`][]. However, if calling `write()` is preferred, it is -possible to respect backpressure and avoid memory issues using the -[`'drain'`][] event: +[`stream.pipe()`][]. However, if calling `write()` is preferred, it is possible +to respect backpressure and avoid memory issues using the [`'drain'`][] event: ```js function write(data, cb) { if (!stream.write(data)) { - stream.once('drain', cb); + stream.once("drain", cb); } else { process.nextTick(cb); } } // Wait for cb to be called before doing any other write. -write('hello', () => { - console.log('Write completed, do more writes now.'); +write("hello", () => { + console.log("Write completed, do more writes now."); }); ``` @@ -621,73 +635,70 @@ A `Writable` stream in object mode will always ignore the `encoding` argument. ### Readable Streams -Readable streams are an abstraction for a *source* from which data is -consumed. +Readable streams are an abstraction for a _source_ from which data is consumed. Examples of `Readable` streams include: -* [HTTP responses, on the client][http-incoming-message] -* [HTTP requests, on the server][http-incoming-message] -* [fs read streams][] -* [zlib streams][zlib] -* [crypto streams][crypto] -* [TCP sockets][] -* [child process stdout and stderr][] -* [`process.stdin`][] +- [HTTP responses, on the client][http-incoming-message] +- [HTTP requests, on the server][http-incoming-message] +- [fs read streams][] +- [zlib streams][zlib] +- [crypto streams][crypto] +- [TCP sockets][] +- [child process stdout and stderr][] +- [`process.stdin`][] All [`Readable`][] streams implement the interface defined by the `stream.Readable` class. #### Two Reading Modes -`Readable` streams effectively operate in one of two modes: flowing and -paused. These modes are separate from [object mode][object-mode]. -A [`Readable`][] stream can be in object mode or not, regardless of whether -it is in flowing mode or paused mode. +`Readable` streams effectively operate in one of two modes: flowing and paused. +These modes are separate from [object mode][object-mode]. A [`Readable`][] +stream can be in object mode or not, regardless of whether it is in flowing mode +or paused mode. -* In flowing mode, data is read from the underlying system automatically -and provided to an application as quickly as possible using events via the -[`EventEmitter`][] interface. +- In flowing mode, data is read from the underlying system automatically and + provided to an application as quickly as possible using events via the + [`EventEmitter`][] interface. -* In paused mode, the [`stream.read()`][stream-read] method must be called -explicitly to read chunks of data from the stream. +- In paused mode, the [`stream.read()`][stream-read] method must be called + explicitly to read chunks of data from the stream. All [`Readable`][] streams begin in paused mode but can be switched to flowing mode in one of the following ways: -* Adding a [`'data'`][] event handler. -* Calling the [`stream.resume()`][stream-resume] method. -* Calling the [`stream.pipe()`][] method to send the data to a [`Writable`][]. +- Adding a [`'data'`][] event handler. +- Calling the [`stream.resume()`][stream-resume] method. +- Calling the [`stream.pipe()`][] method to send the data to a [`Writable`][]. The `Readable` can switch back to paused mode using one of the following: -* If there are no pipe destinations, by calling the +- If there are no pipe destinations, by calling the [`stream.pause()`][stream-pause] method. -* If there are pipe destinations, by removing all pipe destinations. - Multiple pipe destinations may be removed by calling the - [`stream.unpipe()`][] method. +- If there are pipe destinations, by removing all pipe destinations. Multiple + pipe destinations may be removed by calling the [`stream.unpipe()`][] method. The important concept to remember is that a `Readable` will not generate data -until a mechanism for either consuming or ignoring that data is provided. If -the consuming mechanism is disabled or taken away, the `Readable` will *attempt* -to stop generating the data. +until a mechanism for either consuming or ignoring that data is provided. If the +consuming mechanism is disabled or taken away, the `Readable` will _attempt_ to +stop generating the data. For backward compatibility reasons, removing [`'data'`][] event handlers will **not** automatically pause the stream. Also, if there are piped destinations, -then calling [`stream.pause()`][stream-pause] will not guarantee that the -stream will *remain* paused once those destinations drain and ask for more data. +then calling [`stream.pause()`][stream-pause] will not guarantee that the stream +will _remain_ paused once those destinations drain and ask for more data. If a [`Readable`][] is switched into flowing mode and there are no consumers available to handle the data, that data will be lost. This can occur, for instance, when the `readable.resume()` method is called without a listener -attached to the `'data'` event, or when a `'data'` event handler is removed -from the stream. +attached to the `'data'` event, or when a `'data'` event handler is removed from +the stream. -Adding a [`'readable'`][] event handler automatically make the stream to -stop flowing, and the data to be consumed via -[`readable.read()`][stream-read]. If the [`'readable'`] event handler is -removed, then the stream will start flowing again if there is a -[`'data'`][] event handler. +Adding a [`'readable'`][] event handler automatically make the stream to stop +flowing, and the data to be consumed via [`readable.read()`][stream-read]. If +the [`'readable'`] event handler is removed, then the stream will start flowing +again if there is a [`'data'`][] event handler. #### Three States @@ -698,25 +709,25 @@ within the `Readable` stream implementation. Specifically, at any given point in time, every `Readable` is in one of three possible states: -* `readable.readableFlowing === null` -* `readable.readableFlowing === false` -* `readable.readableFlowing === true` +- `readable.readableFlowing === null` +- `readable.readableFlowing === false` +- `readable.readableFlowing === true` When `readable.readableFlowing` is `null`, no mechanism for consuming the -stream's data is provided. Therefore, the stream will not generate data. -While in this state, attaching a listener for the `'data'` event, calling the +stream's data is provided. Therefore, the stream will not generate data. While +in this state, attaching a listener for the `'data'` event, calling the `readable.pipe()` method, or calling the `readable.resume()` method will switch `readable.readableFlowing` to `true`, causing the `Readable` to begin actively emitting events as data is generated. -Calling `readable.pause()`, `readable.unpipe()`, or receiving backpressure -will cause the `readable.readableFlowing` to be set as `false`, -temporarily halting the flowing of events but *not* halting the generation of -data. While in this state, attaching a listener for the `'data'` event -will not switch `readable.readableFlowing` to `true`. +Calling `readable.pause()`, `readable.unpipe()`, or receiving backpressure will +cause the `readable.readableFlowing` to be set as `false`, temporarily halting +the flowing of events but _not_ halting the generation of data. While in this +state, attaching a listener for the `'data'` event will not switch +`readable.readableFlowing` to `true`. ```js -const { PassThrough, Writable } = require('stream'); +const { PassThrough, Writable } = require("stream"); const pass = new PassThrough(); const writable = new Writable(); @@ -724,30 +735,33 @@ pass.pipe(writable); pass.unpipe(writable); // readableFlowing is now false. -pass.on('data', (chunk) => { console.log(chunk.toString()); }); -pass.write('ok'); // Will not emit 'data'. -pass.resume(); // Must be called to make stream emit 'data'. +pass.on("data", chunk => { + console.log(chunk.toString()); +}); +pass.write("ok"); // Will not emit 'data'. +pass.resume(); // Must be called to make stream emit 'data'. ``` -While `readable.readableFlowing` is `false`, data may be accumulating -within the stream's internal buffer. +While `readable.readableFlowing` is `false`, data may be accumulating within the +stream's internal buffer. #### Choose One API Style The `Readable` stream API evolved across multiple Node.js versions and provides multiple methods of consuming stream data. In general, developers should choose -*one* of the methods of consuming data and *should never* use multiple methods -to consume data from a single stream. Specifically, using a combination -of `on('data')`, `on('readable')`, `pipe()`, or async iterators could -lead to unintuitive behavior. +_one_ of the methods of consuming data and _should never_ use multiple methods +to consume data from a single stream. Specifically, using a combination of +`on('data')`, `on('readable')`, `pipe()`, or async iterators could lead to +unintuitive behavior. Use of the `readable.pipe()` method is recommended for most users as it has been implemented to provide the easiest way of consuming stream data. Developers that require more fine-grained control over the transfer and generation of data can -use the [`EventEmitter`][] and `readable.on('readable')`/`readable.read()` -or the `readable.pause()`/`readable.resume()` APIs. +use the [`EventEmitter`][] and `readable.on('readable')`/`readable.read()` or +the `readable.pause()`/`readable.resume()` APIs. #### Class: stream.Readable + @@ -755,6 +769,7 @@ added: v0.9.4 ##### Event: 'close' + -* `chunk` {Buffer|string|any} The chunk of data. For streams that are not - operating in object mode, the chunk will be either a string or `Buffer`. - For streams that are in object mode, the chunk can be any JavaScript value - other than `null`. +- `chunk` {Buffer|string|any} The chunk of data. For streams that are not + operating in object mode, the chunk will be either a string or `Buffer`. For + streams that are in object mode, the chunk can be any JavaScript value other + than `null`. The `'data'` event is emitted whenever the stream is relinquishing ownership of -a chunk of data to a consumer. This may occur whenever the stream is switched -in flowing mode by calling `readable.pipe()`, `readable.resume()`, or by -attaching a listener callback to the `'data'` event. The `'data'` event will -also be emitted whenever the `readable.read()` method is called and a chunk of -data is available to be returned. +a chunk of data to a consumer. This may occur whenever the stream is switched in +flowing mode by calling `readable.pipe()`, `readable.resume()`, or by attaching +a listener callback to the `'data'` event. The `'data'` event will also be +emitted whenever the `readable.read()` method is called and a chunk of data is +available to be returned. Attaching a `'data'` event listener to a stream that has not been explicitly paused will switch the stream into flowing mode. Data will then be passed as soon as it is available. The listener callback will be passed the chunk of data as a string if a default -encoding has been specified for the stream using the -`readable.setEncoding()` method; otherwise the data will be passed as a -`Buffer`. +encoding has been specified for the stream using the `readable.setEncoding()` +method; otherwise the data will be passed as a `Buffer`. ```js const readable = getReadableStreamSomehow(); -readable.on('data', (chunk) => { +readable.on("data", chunk => { console.log(`Received ${chunk.length} bytes of data.`); }); ``` ##### Event: 'end' + -The `'end'` event is emitted when there is no more data to be consumed from -the stream. +The `'end'` event is emitted when there is no more data to be consumed from the +stream. The `'end'` event **will not be emitted** unless the data is completely -consumed. This can be accomplished by switching the stream into flowing mode, -or by calling [`stream.read()`][stream-read] repeatedly until all data has been +consumed. This can be accomplished by switching the stream into flowing mode, or +by calling [`stream.read()`][stream-read] repeatedly until all data has been consumed. ```js const readable = getReadableStreamSomehow(); -readable.on('data', (chunk) => { +readable.on("data", chunk => { console.log(`Received ${chunk.length} bytes of data.`); }); -readable.on('end', () => { - console.log('There will be no more data.'); +readable.on("end", () => { + console.log("There will be no more data."); }); ``` ##### Event: 'error' + -* {Error} +- {Error} The `'error'` event may be emitted by a `Readable` implementation at any time. Typically, this may occur if the underlying stream is unable to generate data @@ -842,6 +859,7 @@ to push an invalid chunk of data. The listener callback will be passed a single `Error` object. ##### Event: 'pause' + @@ -850,6 +868,7 @@ The `'pause'` event is emitted when [`stream.pause()`][stream-pause] is called and `readableFlowing` is not `false`. ##### Event: 'readable' + @@ -927,40 +946,42 @@ The `'resume'` event is emitted when [`stream.resume()`][stream-resume] is called and `readableFlowing` is not `true`. ##### readable.destroy([error]) + -* `error` {Error} Error which will be passed as payload in `'error'` event -* Returns: {this} +- `error` {Error} Error which will be passed as payload in `'error'` event +- Returns: {this} Destroy the stream. Optionally emit an `'error'` event, and emit a `'close'` -event unless `emitClose` is set in `false`. After this call, the readable -stream will release any internal resources and subsequent calls to `push()` -will be ignored. -Implementors should not override this method, but instead implement +event unless `emitClose` is set in `false`. After this call, the readable stream +will release any internal resources and subsequent calls to `push()` will be +ignored. Implementors should not override this method, but instead implement [`readable._destroy()`][readable-_destroy]. ##### readable.destroyed + -* {boolean} +- {boolean} Is `true` after [`readable.destroy()`][readable-destroy] has been called. ##### readable.isPaused() + -* Returns: {boolean} +- Returns: {boolean} The `readable.isPaused()` method returns the current operating state of the `Readable`. This is used primarily by the mechanism that underlies the -`readable.pipe()` method. In most typical cases, there will be no reason to -use this method directly. +`readable.pipe()` method. In most typical cases, there will be no reason to use +this method directly. ```js const readable = new stream.Readable(); @@ -973,11 +994,12 @@ readable.isPaused(); // === false ``` ##### readable.pause() + -* Returns: {this} +- Returns: {this} The `readable.pause()` method will cause a stream in flowing mode to stop emitting [`'data'`][] events, switching out of flowing mode. Any data that @@ -985,44 +1007,45 @@ becomes available will remain in the internal buffer. ```js const readable = getReadableStreamSomehow(); -readable.on('data', (chunk) => { +readable.on("data", chunk => { console.log(`Received ${chunk.length} bytes of data.`); readable.pause(); - console.log('There will be no additional data for 1 second.'); + console.log("There will be no additional data for 1 second."); setTimeout(() => { - console.log('Now data will start flowing again.'); + console.log("Now data will start flowing again."); readable.resume(); }, 1000); }); ``` -The `readable.pause()` method has no effect if there is a `'readable'` -event listener. +The `readable.pause()` method has no effect if there is a `'readable'` event +listener. ##### readable.pipe(destination[, options]) + -* `destination` {stream.Writable} The destination for writing data -* `options` {Object} Pipe options - * `end` {boolean} End the writer when the reader ends. **Default:** `true`. -* Returns: {stream.Writable} The *destination*, allowing for a chain of pipes if +- `destination` {stream.Writable} The destination for writing data +- `options` {Object} Pipe options + - `end` {boolean} End the writer when the reader ends. **Default:** `true`. +- Returns: {stream.Writable} The _destination_, allowing for a chain of pipes if it is a [`Duplex`][] or a [`Transform`][] stream The `readable.pipe()` method attaches a [`Writable`][] stream to the `readable`, -causing it to switch automatically into flowing mode and push all of its data -to the attached [`Writable`][]. The flow of data will be automatically managed -so that the destination `Writable` stream is not overwhelmed by a faster -`Readable` stream. +causing it to switch automatically into flowing mode and push all of its data to +the attached [`Writable`][]. The flow of data will be automatically managed so +that the destination `Writable` stream is not overwhelmed by a faster `Readable` +stream. The following example pipes all of the data from the `readable` into a file named `file.txt`: ```js -const fs = require('fs'); +const fs = require("fs"); const readable = getReadableStreamSomehow(); -const writable = fs.createWriteStream('file.txt'); +const writable = fs.createWriteStream("file.txt"); // All the data from readable goes into 'file.txt'. readable.pipe(writable); ``` @@ -1030,14 +1053,14 @@ readable.pipe(writable); It is possible to attach multiple `Writable` streams to a single `Readable` stream. -The `readable.pipe()` method returns a reference to the *destination* stream +The `readable.pipe()` method returns a reference to the _destination_ stream making it possible to set up chains of piped streams: ```js -const fs = require('fs'); -const r = fs.createReadStream('file.txt'); +const fs = require("fs"); +const r = fs.createReadStream("file.txt"); const z = zlib.createGzip(); -const w = fs.createWriteStream('file.txt.gz'); +const w = fs.createWriteStream("file.txt.gz"); r.pipe(z).pipe(w); ``` @@ -1047,38 +1070,42 @@ destination is no longer writable. To disable this default behavior, the `end` option can be passed as `false`, causing the destination stream to remain open: ```js -reader.pipe(writer, { end: false }); -reader.on('end', () => { - writer.end('Goodbye\n'); +reader.pipe( + writer, + { end: false } +); +reader.on("end", () => { + writer.end("Goodbye\n"); }); ``` One important caveat is that if the `Readable` stream emits an error during -processing, the `Writable` destination *is not closed* automatically. If an -error occurs, it will be necessary to *manually* close each stream in order -to prevent memory leaks. +processing, the `Writable` destination _is not closed_ automatically. If an +error occurs, it will be necessary to _manually_ close each stream in order to +prevent memory leaks. The [`process.stderr`][] and [`process.stdout`][] `Writable` streams are never closed until the Node.js process exits, regardless of the specified options. ##### readable.read([size]) + -* `size` {number} Optional argument to specify how much data to read. -* Returns: {string|Buffer|null|any} +- `size` {number} Optional argument to specify how much data to read. +- Returns: {string|Buffer|null|any} The `readable.read()` method pulls some data out of the internal buffer and -returns it. If no data available to be read, `null` is returned. By default, -the data will be returned as a `Buffer` object unless an encoding has been -specified using the `readable.setEncoding()` method or the stream is operating -in object mode. +returns it. If no data available to be read, `null` is returned. By default, the +data will be returned as a `Buffer` object unless an encoding has been specified +using the `readable.setEncoding()` method or the stream is operating in object +mode. The optional `size` argument specifies a specific number of bytes to read. If -`size` bytes are not available to be read, `null` will be returned *unless* -the stream has ended, in which case all of the data remaining in the internal -buffer will be returned. +`size` bytes are not available to be read, `null` will be returned _unless_ the +stream has ended, in which case all of the data remaining in the internal buffer +will be returned. If the `size` argument is not specified, all of the data contained in the internal buffer will be returned. @@ -1089,7 +1116,7 @@ automatically until the internal buffer is fully drained. ```js const readable = getReadableStreamSomehow(); -readable.on('readable', () => { +readable.on("readable", () => { let chunk; while (null !== (chunk = readable.read())) { console.log(`Received ${chunk.length} bytes of data.`); @@ -1097,13 +1124,12 @@ readable.on('readable', () => { }); ``` -The `while` loop is necessary when processing data with -`readable.read()`. Only after `readable.read()` returns `null`, -[`'readable'`][] will be emitted. +The `while` loop is necessary when processing data with `readable.read()`. Only +after `readable.read()` returns `null`, [`'readable'`][] will be emitted. -A `Readable` stream in object mode will always return a single item from -a call to [`readable.read(size)`][stream-read], regardless of the value of the -`size` argument. +A `Readable` stream in object mode will always return a single item from a call +to [`readable.read(size)`][stream-read], regardless of the value of the `size` +argument. If the `readable.read()` method returns a chunk of data, a `'data'` event will also be emitted. @@ -1112,74 +1138,81 @@ Calling [`stream.read([size])`][stream-read] after the [`'end'`][] event has been emitted will return `null`. No runtime error will be raised. ##### readable.readable + -* {boolean} +- {boolean} Is `true` if it is safe to call [`readable.read()`][stream-read]. ##### readable.readableEncoding + -* {null|string} +- {null|string} Getter for the property `encoding` of a given `Readable` stream. The `encoding` property can be set using the [`readable.setEncoding()`][] method. ##### readable.readableEnded + -* {boolean} +- {boolean} Becomes `true` when [`'end'`][] event is emitted. ##### readable.readableFlowing + -* {boolean} +- {boolean} -This property reflects the current state of a `Readable` stream as described -in the [Stream Three States][] section. +This property reflects the current state of a `Readable` stream as described in +the [Stream Three States][] section. ##### readable.readableHighWaterMark + -* {number} +- {number} -Returns the value of `highWaterMark` passed when constructing this -`Readable`. +Returns the value of `highWaterMark` passed when constructing this `Readable`. ##### readable.readableLength + -* {number} +- {number} -This property contains the number of bytes (or objects) in the queue -ready to be read. The value provides introspection data regarding -the status of the `highWaterMark`. +This property contains the number of bytes (or objects) in the queue ready to be +read. The value provides introspection data regarding the status of the +`highWaterMark`. ##### readable.readableObjectMode + -* {boolean} +- {boolean} Getter for the property `objectMode` of a given `Readable` stream. ##### readable.resume() + -* Returns: {this} +- Returns: {this} The `readable.resume()` method causes an explicitly paused `Readable` stream to resume emitting [`'data'`][] events, switching the stream into flowing mode. @@ -1200,32 +1233,32 @@ stream without actually processing any of that data: ```js getReadableStreamSomehow() .resume() - .on('end', () => { - console.log('Reached the end, but did not read anything.'); + .on("end", () => { + console.log("Reached the end, but did not read anything."); }); ``` -The `readable.resume()` method has no effect if there is a `'readable'` -event listener. +The `readable.resume()` method has no effect if there is a `'readable'` event +listener. ##### readable.setEncoding(encoding) + -* `encoding` {string} The encoding to use. -* Returns: {this} +- `encoding` {string} The encoding to use. +- Returns: {this} -The `readable.setEncoding()` method sets the character encoding for -data read from the `Readable` stream. +The `readable.setEncoding()` method sets the character encoding for data read +from the `Readable` stream. -By default, no encoding is assigned and stream data will be returned as -`Buffer` objects. Setting an encoding causes the stream data -to be returned as strings of the specified encoding rather than as `Buffer` -objects. For instance, calling `readable.setEncoding('utf8')` will cause the -output data to be interpreted as UTF-8 data, and passed as strings. Calling -`readable.setEncoding('hex')` will cause the data to be encoded in hexadecimal -string format. +By default, no encoding is assigned and stream data will be returned as `Buffer` +objects. Setting an encoding causes the stream data to be returned as strings of +the specified encoding rather than as `Buffer` objects. For instance, calling +`readable.setEncoding('utf8')` will cause the output data to be interpreted as +UTF-8 data, and passed as strings. Calling `readable.setEncoding('hex')` will +cause the data to be encoded in hexadecimal string format. The `Readable` stream will properly handle multi-byte characters delivered through the stream that would otherwise become improperly decoded if simply @@ -1233,45 +1266,47 @@ pulled from the stream as `Buffer` objects. ```js const readable = getReadableStreamSomehow(); -readable.setEncoding('utf8'); -readable.on('data', (chunk) => { - assert.equal(typeof chunk, 'string'); - console.log('Got %d characters of string data:', chunk.length); +readable.setEncoding("utf8"); +readable.on("data", chunk => { + assert.equal(typeof chunk, "string"); + console.log("Got %d characters of string data:", chunk.length); }); ``` ##### readable.unpipe([destination]) + -* `destination` {stream.Writable} Optional specific stream to unpipe -* Returns: {this} +- `destination` {stream.Writable} Optional specific stream to unpipe +- Returns: {this} The `readable.unpipe()` method detaches a `Writable` stream previously attached using the [`stream.pipe()`][] method. -If the `destination` is not specified, then *all* pipes are detached. +If the `destination` is not specified, then _all_ pipes are detached. -If the `destination` is specified, but no pipe is set up for it, then -the method does nothing. +If the `destination` is specified, but no pipe is set up for it, then the method +does nothing. ```js -const fs = require('fs'); +const fs = require("fs"); const readable = getReadableStreamSomehow(); -const writable = fs.createWriteStream('file.txt'); +const writable = fs.createWriteStream("file.txt"); // All the data from readable goes into 'file.txt', // but only for the first second. readable.pipe(writable); setTimeout(() => { - console.log('Stop writing to file.txt.'); + console.log("Stop writing to file.txt."); readable.unpipe(writable); - console.log('Manually close the file stream.'); + console.log("Manually close the file stream."); writable.end(); }, 1000); ``` ##### readable.unshift(chunk[, encoding]) + -* `chunk` {Buffer|Uint8Array|string|null|any} Chunk of data to unshift onto the +- `chunk` {Buffer|Uint8Array|string|null|any} Chunk of data to unshift onto the read queue. For streams not operating in object mode, `chunk` must be a - string, `Buffer`, `Uint8Array` or `null`. For object mode streams, `chunk` - may be any JavaScript value. -* `encoding` {string} Encoding of string chunks. Must be a valid - `Buffer` encoding, such as `'utf8'` or `'ascii'`. + string, `Buffer`, `Uint8Array` or `null`. For object mode streams, `chunk` may + be any JavaScript value. +- `encoding` {string} Encoding of string chunks. Must be a valid `Buffer` + encoding, such as `'utf8'` or `'ascii'`. Passing `chunk` as `null` signals the end of the stream (EOF), after which no more data can be written. @@ -1298,20 +1333,20 @@ pulled out of the source, so that the data can be passed on to some other party. The `stream.unshift(chunk)` method cannot be called after the [`'end'`][] event has been emitted or a runtime error will be thrown. -Developers using `stream.unshift()` often should consider switching to -use of a [`Transform`][] stream instead. See the [API for Stream Implementers][] -section for more information. +Developers using `stream.unshift()` often should consider switching to use of a +[`Transform`][] stream instead. See the [API for Stream Implementers][] section +for more information. ```js // Pull off a header delimited by \n\n. // Use unshift() if we get too much. // Call the callback with (error, header, stream). -const { StringDecoder } = require('string_decoder'); +const { StringDecoder } = require("string_decoder"); function parseHeader(stream, callback) { - stream.on('error', callback); - stream.on('readable', onReadable); - const decoder = new StringDecoder('utf8'); - let header = ''; + stream.on("error", callback); + stream.on("readable", onReadable); + const decoder = new StringDecoder("utf8"); + let header = ""; function onReadable() { let chunk; while (null !== (chunk = stream.read())) { @@ -1320,13 +1355,12 @@ function parseHeader(stream, callback) { // Found the header boundary. const split = str.split(/\n\n/); header += split.shift(); - const remaining = split.join('\n\n'); - const buf = Buffer.from(remaining, 'utf8'); - stream.removeListener('error', callback); + const remaining = split.join("\n\n"); + const buf = Buffer.from(remaining, "utf8"); + stream.removeListener("error", callback); // Remove the 'readable' listener before unshifting. - stream.removeListener('readable', onReadable); - if (buf.length) - stream.unshift(buf); + stream.removeListener("readable", onReadable); + if (buf.length) stream.unshift(buf); // Now the body of the message can be read from the stream. callback(null, header, stream); } else { @@ -1338,22 +1372,23 @@ function parseHeader(stream, callback) { } ``` -Unlike [`stream.push(chunk)`][stream-push], `stream.unshift(chunk)` will not -end the reading process by resetting the internal reading state of the stream. -This can cause unexpected results if `readable.unshift()` is called during a -read (i.e. from within a [`stream._read()`][stream-_read] implementation on a -custom stream). Following the call to `readable.unshift()` with an immediate +Unlike [`stream.push(chunk)`][stream-push], `stream.unshift(chunk)` will not end +the reading process by resetting the internal reading state of the stream. This +can cause unexpected results if `readable.unshift()` is called during a read +(i.e. from within a [`stream._read()`][stream-_read] implementation on a custom +stream). Following the call to `readable.unshift()` with an immediate [`stream.push('')`][stream-push] will reset the reading state appropriately, however it is best to simply avoid calling `readable.unshift()` while in the process of performing a read. ##### readable.wrap(stream) + -* `stream` {Stream} An "old style" readable stream -* Returns: {this} +- `stream` {Stream} An "old style" readable stream +- Returns: {this} Prior to Node.js 0.10, streams did not implement the entire `stream` module API as it is currently defined. (See [Compatibility][] for more information.) @@ -1368,17 +1403,18 @@ provided as a convenience for interacting with older Node.js applications and libraries. ```js -const { OldReader } = require('./old-api-module.js'); -const { Readable } = require('stream'); +const { OldReader } = require("./old-api-module.js"); +const { Readable } = require("stream"); const oreader = new OldReader(); const myReader = new Readable().wrap(oreader); -myReader.on('readable', () => { +myReader.on("readable", () => { myReader.read(); // etc. }); ``` ##### readable\[Symbol.asyncIterator\]() + -* Returns: {AsyncIterator} to fully consume the stream. +- Returns: {AsyncIterator} to fully consume the stream. ```js -const fs = require('fs'); +const fs = require("fs"); async function print(readable) { - readable.setEncoding('utf8'); - let data = ''; + readable.setEncoding("utf8"); + let data = ""; for await (const chunk of readable) { data += chunk; } console.log(data); } -print(fs.createReadStream('file')).catch(console.error); +print(fs.createReadStream("file")).catch(console.error); ``` If the loop terminates with a `break` or a `throw`, the stream will be @@ -1414,6 +1450,7 @@ has less then 64kb of data because no `highWaterMark` option is provided to ### Duplex and Transform Streams #### Class: stream.Duplex + @@ -1447,54 +1485,56 @@ implement both the [`Readable`][] and [`Writable`][] interfaces. Examples of `Transform` streams include: -* [zlib streams][zlib] -* [crypto streams][crypto] +- [zlib streams][zlib] +- [crypto streams][crypto] ##### transform.destroy([error]) + -* `error` {Error} +- `error` {Error} Destroy the stream, and optionally emit an `'error'` event. After this call, the -transform stream would release any internal resources. -Implementors should not override this method, but instead implement -[`readable._destroy()`][readable-_destroy]. -The default implementation of `_destroy()` for `Transform` also emit `'close'` -unless `emitClose` is set in false. +transform stream would release any internal resources. Implementors should not +override this method, but instead implement +[`readable._destroy()`][readable-_destroy]. The default implementation of +`_destroy()` for `Transform` also emit `'close'` unless `emitClose` is set in +false. ### stream.finished(stream[, options], callback) + -* `stream` {Stream} A readable and/or writable stream. -* `options` {Object} - * `error` {boolean} If set to `false`, then a call to `emit('error', err)` is +- `stream` {Stream} A readable and/or writable stream. +- `options` {Object} + - `error` {boolean} If set to `false`, then a call to `emit('error', err)` is not treated as finished. **Default**: `true`. - * `readable` {boolean} When set to `false`, the callback will be called when - the stream ends even though the stream might still be readable. - **Default**: `true`. - * `writable` {boolean} When set to `false`, the callback will be called when - the stream ends even though the stream might still be writable. - **Default**: `true`. -* `callback` {Function} A callback function that takes an optional error + - `readable` {boolean} When set to `false`, the callback will be called when + the stream ends even though the stream might still be readable. **Default**: + `true`. + - `writable` {boolean} When set to `false`, the callback will be called when + the stream ends even though the stream might still be writable. **Default**: + `true`. +- `callback` {Function} A callback function that takes an optional error argument. -A function to get notified when a stream is no longer readable, writable -or has experienced an error or a premature close event. +A function to get notified when a stream is no longer readable, writable or has +experienced an error or a premature close event. ```js -const { finished } = require('stream'); +const { finished } = require("stream"); -const rs = fs.createReadStream('archive.tar'); +const rs = fs.createReadStream("archive.tar"); -finished(rs, (err) => { +finished(rs, err => { if (err) { - console.error('Stream failed.', err); + console.error("Stream failed.", err); } else { - console.log('Stream is done reading.'); + console.log("Stream is done reading."); } }); @@ -1502,19 +1542,19 @@ rs.resume(); // Drain the stream. ``` Especially useful in error handling scenarios where a stream is destroyed -prematurely (like an aborted HTTP request), and will not emit `'end'` -or `'finish'`. +prematurely (like an aborted HTTP request), and will not emit `'end'` or +`'finish'`. The `finished` API is promisify-able as well; ```js const finished = util.promisify(stream.finished); -const rs = fs.createReadStream('archive.tar'); +const rs = fs.createReadStream("archive.tar"); async function run() { await finished(rs); - console.log('Stream is done reading.'); + console.log("Stream is done reading."); } run().catch(console.error); @@ -1522,21 +1562,22 @@ rs.resume(); // Drain the stream. ``` ### stream.pipeline(...streams, callback) + -* `...streams` {Stream} Two or more streams to pipe between. -* `callback` {Function} Called when the pipeline is fully done. - * `err` {Error} +- `...streams` {Stream} Two or more streams to pipe between. +- `callback` {Function} Called when the pipeline is fully done. + - `err` {Error} A module method to pipe between streams forwarding errors and properly cleaning up and provide a callback when the pipeline is complete. ```js -const { pipeline } = require('stream'); -const fs = require('fs'); -const zlib = require('zlib'); +const { pipeline } = require("stream"); +const fs = require("fs"); +const zlib = require("zlib"); // Use the pipeline API to easily pipe a series of streams // together and get notified when the pipeline is fully done. @@ -1544,14 +1585,14 @@ const zlib = require('zlib'); // A pipeline to gzip a potentially huge tar file efficiently: pipeline( - fs.createReadStream('archive.tar'), + fs.createReadStream("archive.tar"), zlib.createGzip(), - fs.createWriteStream('archive.tar.gz'), - (err) => { + fs.createWriteStream("archive.tar.gz"), + err => { if (err) { - console.error('Pipeline failed.', err); + console.error("Pipeline failed.", err); } else { - console.log('Pipeline succeeded.'); + console.log("Pipeline succeeded."); } } ); @@ -1564,41 +1605,42 @@ const pipeline = util.promisify(stream.pipeline); async function run() { await pipeline( - fs.createReadStream('archive.tar'), + fs.createReadStream("archive.tar"), zlib.createGzip(), - fs.createWriteStream('archive.tar.gz') + fs.createWriteStream("archive.tar.gz") ); - console.log('Pipeline succeeded.'); + console.log("Pipeline succeeded."); } run().catch(console.error); ``` ### stream.Readable.from(iterable, [options]) + -* `iterable` {Iterable} Object implementing the `Symbol.asyncIterator` or +- `iterable` {Iterable} Object implementing the `Symbol.asyncIterator` or `Symbol.iterator` iterable protocol. -* `options` {Object} Options provided to `new stream.Readable([options])`. - By default, `Readable.from()` will set `options.objectMode` to `true`, unless +- `options` {Object} Options provided to `new stream.Readable([options])`. By + default, `Readable.from()` will set `options.objectMode` to `true`, unless this is explicitly opted out by setting `options.objectMode` to `false`. -* Returns: {stream.Readable} +- Returns: {stream.Readable} A utility method for creating Readable Streams out of iterators. ```js -const { Readable } = require('stream'); +const { Readable } = require("stream"); -async function * generate() { - yield 'hello'; - yield 'streams'; +async function* generate() { + yield "hello"; + yield "streams"; } const readable = Readable.from(generate()); -readable.on('data', (chunk) => { +readable.on("data", chunk => { console.log(chunk); }); ``` @@ -1616,8 +1658,9 @@ of the four basic stream classes (`stream.Writable`, `stream.Readable`, parent class constructor: + ```js -const { Writable } = require('stream'); +const { Writable } = require("stream"); class MyWritable extends Writable { constructor(options) { @@ -1630,19 +1673,20 @@ class MyWritable extends Writable { The new stream class must then implement one or more specific methods, depending on the type of stream being created, as detailed in the chart below: -| Use-case | Class | Method(s) to implement | -| -------- | ----- | ---------------------- | -| Reading only | [`Readable`] | [_read()][stream-_read] | -| Writing only | [`Writable`] | [_write()][stream-_write], [_writev()][stream-_writev], [_final()][stream-_final] | -| Reading and writing | [`Duplex`] | [_read()][stream-_read], [_write()][stream-_write], [_writev()][stream-_writev], [_final()][stream-_final] | -| Operate on written data, then read the result | [`Transform`] | [_transform()][stream-_transform], [_flush()][stream-_flush], [_final()][stream-_final] | +| Use-case | Class | Method(s) to implement | +| --------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Reading only | [`Readable`] | [\_read()][stream-_read] | +| Writing only | [`Writable`] | [\_write()][stream-_write], [\_writev()][stream-_writev], [\_final()][stream-_final] | +| Reading and writing | [`Duplex`] | [\_read()][stream-_read], [\_write()][stream-_write], [\_writev()][stream-_writev], [\_final()][stream-_final] | +| Operate on written data, then read the result | [`Transform`] | [\_transform()][stream-_transform], [\_flush()][stream-_flush], [\_final()][stream-_final] | -The implementation code for a stream should *never* call the "public" methods -of a stream that are intended for use by consumers (as described in the -[API for Stream Consumers][] section). Doing so may lead to adverse side effects -in application code consuming the stream. +The implementation code for a stream should _never_ call the "public" methods of +a stream that are intended for use by consumers (as described in the [API for +Stream Consumers][] section). Doing so may lead to adverse side effects in +application code consuming the stream. ### Simplified Construction + @@ -1653,7 +1697,7 @@ inheritance. This can be accomplished by directly creating instances of the objects and passing appropriate methods as constructor options. ```js -const { Writable } = require('stream'); +const { Writable } = require("stream"); const myWritable = new Writable({ write(chunk, encoding, callback) { @@ -1666,11 +1710,12 @@ const myWritable = new Writable({ The `stream.Writable` class is extended to implement a [`Writable`][] stream. -Custom `Writable` streams *must* call the `new stream.Writable([options])` +Custom `Writable` streams _must_ call the `new stream.Writable([options])` constructor and implement the `writable._write()` method. The -`writable._writev()` method *may* also be implemented. +`writable._writev()` method _may_ also be implemented. #### Constructor: new stream.Writable([options]) + -* `options` {Object} - * `highWaterMark` {number} Buffer level when - [`stream.write()`][stream-write] starts returning `false`. **Default:** - `16384` (16kb), or `16` for `objectMode` streams. - * `decodeStrings` {boolean} Whether to encode `string`s passed to - [`stream.write()`][stream-write] to `Buffer`s (with the encoding - specified in the [`stream.write()`][stream-write] call) before passing - them to [`stream._write()`][stream-_write]. Other types of data are not - converted (i.e. `Buffer`s are not decoded into `string`s). Setting to - false will prevent `string`s from being converted. **Default:** `true`. - * `defaultEncoding` {string} The default encoding that is used when no +- `options` {Object} + - `highWaterMark` {number} Buffer level when [`stream.write()`][stream-write] + starts returning `false`. **Default:** `16384` (16kb), or `16` for + `objectMode` streams. + - `decodeStrings` {boolean} Whether to encode `string`s passed to + [`stream.write()`][stream-write] to `Buffer`s (with the encoding specified + in the [`stream.write()`][stream-write] call) before passing them to + [`stream._write()`][stream-_write]. Other types of data are not converted + (i.e. `Buffer`s are not decoded into `string`s). Setting to false will + prevent `string`s from being converted. **Default:** `true`. + - `defaultEncoding` {string} The default encoding that is used when no encoding is specified as an argument to [`stream.write()`][stream-write]. **Default:** `'utf8'`. - * `objectMode` {boolean} Whether or not the - [`stream.write(anyObj)`][stream-write] is a valid operation. When set, - it becomes possible to write JavaScript values other than string, - `Buffer` or `Uint8Array` if supported by the stream implementation. - **Default:** `false`. - * `emitClose` {boolean} Whether or not the stream should emit `'close'` - after it has been destroyed. **Default:** `true`. - * `write` {Function} Implementation for the - [`stream._write()`][stream-_write] method. - * `writev` {Function} Implementation for the + - `objectMode` {boolean} Whether or not the + [`stream.write(anyObj)`][stream-write] is a valid operation. When set, it + becomes possible to write JavaScript values other than string, `Buffer` or + `Uint8Array` if supported by the stream implementation. **Default:** + `false`. + - `emitClose` {boolean} Whether or not the stream should emit `'close'` after + it has been destroyed. **Default:** `true`. + - `write` {Function} Implementation for the [`stream._write()`][stream-_write] + method. + - `writev` {Function} Implementation for the [`stream._writev()`][stream-_writev] method. - * `destroy` {Function} Implementation for the + - `destroy` {Function} Implementation for the [`stream._destroy()`][writable-_destroy] method. - * `final` {Function} Implementation for the - [`stream._final()`][stream-_final] method. - * `autoDestroy` {boolean} Whether this stream should automatically call + - `final` {Function} Implementation for the [`stream._final()`][stream-_final] + method. + - `autoDestroy` {boolean} Whether this stream should automatically call `.destroy()` on itself after ending. **Default:** `false`. + ```js -const { Writable } = require('stream'); +const { Writable } = require("stream"); class MyWritable extends Writable { constructor(options) { @@ -1730,12 +1776,11 @@ class MyWritable extends Writable { Or, when using pre-ES6 style constructors: ```js -const { Writable } = require('stream'); -const util = require('util'); +const { Writable } = require("stream"); +const util = require("util"); function MyWritable(options) { - if (!(this instanceof MyWritable)) - return new MyWritable(options); + if (!(this instanceof MyWritable)) return new MyWritable(options); Writable.call(this, options); } util.inherits(MyWritable, Writable); @@ -1744,7 +1789,7 @@ util.inherits(MyWritable, Writable); Or, using the Simplified Constructor approach: ```js -const { Writable } = require('stream'); +const { Writable } = require("stream"); const myWritable = new Writable({ write(chunk, encoding, callback) { @@ -1758,16 +1803,16 @@ const myWritable = new Writable({ #### writable.\_write(chunk, encoding, callback) -* `chunk` {Buffer|string|any} The `Buffer` to be written, converted from the +- `chunk` {Buffer|string|any} The `Buffer` to be written, converted from the `string` passed to [`stream.write()`][stream-write]. If the stream's `decodeStrings` option is `false` or the stream is operating in object mode, the chunk will not be converted & will be whatever was passed to [`stream.write()`][stream-write]. -* `encoding` {string} If the chunk is a string, then `encoding` is the - character encoding of that string. If chunk is a `Buffer`, or if the - stream is operating in object mode, `encoding` may be ignored. -* `callback` {Function} Call this function (optionally with an error - argument) when processing is complete for the supplied chunk. +- `encoding` {string} If the chunk is a string, then `encoding` is the character + encoding of that string. If chunk is a `Buffer`, or if the stream is operating + in object mode, `encoding` may be ignored. +- `callback` {Function} Call this function (optionally with an error argument) + when processing is complete for the supplied chunk. All `Writable` stream implementations must provide a [`writable._write()`][stream-_write] method to send data to the underlying @@ -1782,8 +1827,8 @@ methods only. The `callback` method must be called to signal either that the write completed successfully or failed with an error. The first argument passed to the -`callback` must be the `Error` object if the call failed or `null` if the -write succeeded. +`callback` must be the `Error` object if the call failed or `null` if the write +succeeded. All calls to `writable.write()` that occur between the time `writable._write()` is called and the `callback` is called will cause the written data to be @@ -1804,10 +1849,10 @@ user programs. #### writable.\_writev(chunks, callback) -* `chunks` {Object[]} The chunks to be written. Each chunk has following - format: `{ chunk: ..., encoding: ... }`. -* `callback` {Function} A callback function (optionally with an error - argument) to be invoked when processing is complete for the supplied chunks. +- `chunks` {Object[]} The chunks to be written. Each chunk has following format: + `{ chunk: ..., encoding: ... }`. +- `callback` {Function} A callback function (optionally with an error argument) + to be invoked when processing is complete for the supplied chunks. This function MUST NOT be called by application code directly. It should be implemented by child classes, and called by the internal `Writable` class @@ -1823,28 +1868,30 @@ internal to the class that defines it, and should never be called directly by user programs. #### writable.\_destroy(err, callback) + -* `err` {Error} A possible error. -* `callback` {Function} A callback function that takes an optional error +- `err` {Error} A possible error. +- `callback` {Function} A callback function that takes an optional error argument. The `_destroy()` method is called by [`writable.destroy()`][writable-destroy]. It can be overridden by child classes but it **must not** be called directly. #### writable.\_final(callback) + -* `callback` {Function} Call this function (optionally with an error - argument) when finished writing any remaining data. +- `callback` {Function} Call this function (optionally with an error argument) + when finished writing any remaining data. -The `_final()` method **must not** be called directly. It may be implemented -by child classes, and if so, will be called by the internal `Writable` -class methods only. +The `_final()` method **must not** be called directly. It may be implemented by +child classes, and if so, will be called by the internal `Writable` class +methods only. This optional function will be called before the stream closes, delaying the `'finish'` event until `callback` is called. This is useful to close resources @@ -1864,12 +1911,12 @@ If a `Readable` stream pipes into a `Writable` stream when `Writable` emits an error, the `Readable` stream will be unpiped. ```js -const { Writable } = require('stream'); +const { Writable } = require("stream"); const myWritable = new Writable({ write(chunk, encoding, callback) { - if (chunk.toString().indexOf('a') >= 0) { - callback(new Error('chunk is invalid')); + if (chunk.toString().indexOf("a") >= 0) { + callback(new Error("chunk is invalid")); } else { callback(); } @@ -1885,12 +1932,12 @@ is not of any real particular usefulness, the example illustrates each of the required elements of a custom [`Writable`][] stream instance: ```js -const { Writable } = require('stream'); +const { Writable } = require("stream"); class MyWritable extends Writable { _write(chunk, encoding, callback) { - if (chunk.toString().indexOf('a') >= 0) { - callback(new Error('chunk is invalid')); + if (chunk.toString().indexOf("a") >= 0) { + callback(new Error("chunk is invalid")); } else { callback(); } @@ -1906,17 +1953,17 @@ characters encoding, such as UTF-8. The following example shows how to decode multi-byte strings using `StringDecoder` and [`Writable`][]. ```js -const { Writable } = require('stream'); -const { StringDecoder } = require('string_decoder'); +const { Writable } = require("stream"); +const { StringDecoder } = require("string_decoder"); class StringWritable extends Writable { constructor(options) { super(options); this._decoder = new StringDecoder(options && options.defaultEncoding); - this.data = ''; + this.data = ""; } _write(chunk, encoding, callback) { - if (encoding === 'buffer') { + if (encoding === "buffer") { chunk = this._decoder.write(chunk); } this.data += chunk; @@ -1928,10 +1975,10 @@ class StringWritable extends Writable { } } -const euro = [[0xE2, 0x82], [0xAC]].map(Buffer.from); +const euro = [[0xe2, 0x82], [0xac]].map(Buffer.from); const w = new StringWritable(); -w.write('currency: '); +w.write("currency: "); w.write(euro[0]); w.end(euro[1]); @@ -1942,10 +1989,11 @@ console.log(w.data); // currency: € The `stream.Readable` class is extended to implement a [`Readable`][] stream. -Custom `Readable` streams *must* call the `new stream.Readable([options])` +Custom `Readable` streams _must_ call the `new stream.Readable([options])` constructor and implement the `readable._read()` method. #### new stream.Readable([options]) + -* `options` {Object} - * `highWaterMark` {number} The maximum [number of bytes][hwm-gotcha] to store +- `options` {Object} + - `highWaterMark` {number} The maximum [number of bytes][hwm-gotcha] to store in the internal buffer before ceasing to read from the underlying resource. **Default:** `16384` (16kb), or `16` for `objectMode` streams. - * `encoding` {string} If specified, then buffers will be decoded to - strings using the specified encoding. **Default:** `null`. - * `objectMode` {boolean} Whether this stream should behave - as a stream of objects. Meaning that [`stream.read(n)`][stream-read] returns - a single value instead of a `Buffer` of size `n`. **Default:** `false`. - * `emitClose` {boolean} Whether or not the stream should emit `'close'` - after it has been destroyed. **Default:** `true`. - * `read` {Function} Implementation for the [`stream._read()`][stream-_read] + - `encoding` {string} If specified, then buffers will be decoded to strings + using the specified encoding. **Default:** `null`. + - `objectMode` {boolean} Whether this stream should behave as a stream of + objects. Meaning that [`stream.read(n)`][stream-read] returns a single value + instead of a `Buffer` of size `n`. **Default:** `false`. + - `emitClose` {boolean} Whether or not the stream should emit `'close'` after + it has been destroyed. **Default:** `true`. + - `read` {Function} Implementation for the [`stream._read()`][stream-_read] method. - * `destroy` {Function} Implementation for the + - `destroy` {Function} Implementation for the [`stream._destroy()`][readable-_destroy] method. - * `autoDestroy` {boolean} Whether this stream should automatically call + - `autoDestroy` {boolean} Whether this stream should automatically call `.destroy()` on itself after ending. **Default:** `false`. + ```js -const { Readable } = require('stream'); +const { Readable } = require("stream"); class MyReadable extends Readable { constructor(options) { @@ -1988,12 +2037,11 @@ class MyReadable extends Readable { Or, when using pre-ES6 style constructors: ```js -const { Readable } = require('stream'); -const util = require('util'); +const { Readable } = require("stream"); +const util = require("util"); function MyReadable(options) { - if (!(this instanceof MyReadable)) - return new MyReadable(options); + if (!(this instanceof MyReadable)) return new MyReadable(options); Readable.call(this, options); } util.inherits(MyReadable, Readable); @@ -2002,7 +2050,7 @@ util.inherits(MyReadable, Readable); Or, using the Simplified Constructor approach: ```js -const { Readable } = require('stream'); +const { Readable } = require("stream"); const myReadable = new Readable({ read(size) { @@ -2012,11 +2060,12 @@ const myReadable = new Readable({ ``` #### readable.\_read(size) + -* `size` {number} Number of bytes to read asynchronously +- `size` {number} Number of bytes to read asynchronously This function MUST NOT be called by application code directly. It should be implemented by child classes, and called by the internal `Readable` class @@ -2035,29 +2084,31 @@ additional data onto the queue. Once the `readable._read()` method has been called, it will not be called again until the [`readable.push()`][stream-push] method is called. -The `size` argument is advisory. For implementations where a "read" is a -single operation that returns data can use the `size` argument to determine how -much data to fetch. Other implementations may ignore this argument and simply -provide data whenever it becomes available. There is no need to "wait" until -`size` bytes are available before calling [`stream.push(chunk)`][stream-push]. +The `size` argument is advisory. For implementations where a "read" is a single +operation that returns data can use the `size` argument to determine how much +data to fetch. Other implementations may ignore this argument and simply provide +data whenever it becomes available. There is no need to "wait" until `size` +bytes are available before calling [`stream.push(chunk)`][stream-push]. The `readable._read()` method is prefixed with an underscore because it is internal to the class that defines it, and should never be called directly by user programs. #### readable.\_destroy(err, callback) + -* `err` {Error} A possible error. -* `callback` {Function} A callback function that takes an optional error +- `err` {Error} A possible error. +- `callback` {Function} A callback function that takes an optional error argument. The `_destroy()` method is called by [`readable.destroy()`][readable-destroy]. It can be overridden by child classes but it **must not** be called directly. #### readable.push(chunk[, encoding]) + -* `chunk` {Buffer|Uint8Array|string|null|any} Chunk of data to push into the +- `chunk` {Buffer|Uint8Array|string|null|any} Chunk of data to push into the read queue. For streams not operating in object mode, `chunk` must be a - string, `Buffer` or `Uint8Array`. For object mode streams, `chunk` may be - any JavaScript value. -* `encoding` {string} Encoding of string chunks. Must be a valid - `Buffer` encoding, such as `'utf8'` or `'ascii'`. -* Returns: {boolean} `true` if additional chunks of data may continue to be + string, `Buffer` or `Uint8Array`. For object mode streams, `chunk` may be any + JavaScript value. +- `encoding` {string} Encoding of string chunks. Must be a valid `Buffer` + encoding, such as `'utf8'` or `'ascii'`. +- Returns: {boolean} `true` if additional chunks of data may continue to be pushed; `false` otherwise. When `chunk` is a `Buffer`, `Uint8Array` or `string`, the `chunk` of data will -be added to the internal queue for users of the stream to consume. -Passing `chunk` as `null` signals the end of the stream (EOF), after which no -more data can be written. +be added to the internal queue for users of the stream to consume. Passing +`chunk` as `null` signals the end of the stream (EOF), after which no more data +can be written. When the `Readable` is operating in paused mode, the data added with `readable.push()` can be read out by calling the @@ -2104,10 +2155,9 @@ class SourceWrapper extends Readable { this._source = getLowLevelSourceObject(); // Every time there's data, push it into the internal buffer. - this._source.ondata = (chunk) => { + this._source.ondata = chunk => { // If push() returns false, then stop reading from source. - if (!this.push(chunk)) - this._source.readStop(); + if (!this.push(chunk)) this._source.readStop(); }; // When the source ends, push the EOF-signaling `null` chunk. @@ -2127,8 +2177,8 @@ The `readable.push()` method is intended be called only by `Readable` implementers, and only from within the `readable._read()` method. For streams not operating in object mode, if the `chunk` parameter of -`readable.push()` is `undefined`, it will be treated as empty string or -buffer. See [`readable.push('')`][] for more information. +`readable.push()` is `undefined`, it will be treated as empty string or buffer. +See [`readable.push('')`][] for more information. #### Errors While Reading @@ -2140,13 +2190,14 @@ operating in flowing or paused mode. Using the `'error'` event ensures consistent and predictable handling of errors. + ```js -const { Readable } = require('stream'); +const { Readable } = require("stream"); const myReadable = new Readable({ read(size) { if (checkSomeErrorCondition()) { - process.nextTick(() => this.emit('error', err)); + process.nextTick(() => this.emit("error", err)); return; } // Do some work. @@ -2162,7 +2213,7 @@ The following is a basic example of a `Readable` stream that emits the numerals from 1 to 1,000,000 in ascending order, and then ends. ```js -const { Readable } = require('stream'); +const { Readable } = require("stream"); class Counter extends Readable { constructor(opt) { @@ -2173,11 +2224,10 @@ class Counter extends Readable { _read() { const i = this._index++; - if (i > this._max) - this.push(null); + if (i > this._max) this.push(null); else { const str = String(i); - const buf = Buffer.from(str, 'ascii'); + const buf = Buffer.from(str, "ascii"); this.push(buf); } } @@ -2191,18 +2241,19 @@ A [`Duplex`][] stream is one that implements both [`Readable`][] and Because JavaScript does not have support for multiple inheritance, the `stream.Duplex` class is extended to implement a [`Duplex`][] stream (as opposed -to extending the `stream.Readable` *and* `stream.Writable` classes). +to extending the `stream.Readable` _and_ `stream.Writable` classes). The `stream.Duplex` class prototypically inherits from `stream.Readable` and parasitically from `stream.Writable`, but `instanceof` will work properly for both base classes due to overriding [`Symbol.hasInstance`][] on `stream.Writable`. -Custom `Duplex` streams *must* call the `new stream.Duplex([options])` -constructor and implement *both* the `readable._read()` and -`writable._write()` methods. +Custom `Duplex` streams _must_ call the `new stream.Duplex([options])` +constructor and implement _both_ the `readable._read()` and `writable._write()` +methods. #### new stream.Duplex(options) + -* `options` {Object} Passed to both `Writable` and `Readable` - constructors. Also has the following fields: - * `allowHalfOpen` {boolean} If set to `false`, then the stream will +- `options` {Object} Passed to both `Writable` and `Readable` constructors. Also + has the following fields: + - `allowHalfOpen` {boolean} If set to `false`, then the stream will automatically end the writable side when the readable side ends. **Default:** `true`. - * `readableObjectMode` {boolean} Sets `objectMode` for readable side of the + - `readableObjectMode` {boolean} Sets `objectMode` for readable side of the stream. Has no effect if `objectMode` is `true`. **Default:** `false`. - * `writableObjectMode` {boolean} Sets `objectMode` for writable side of the + - `writableObjectMode` {boolean} Sets `objectMode` for writable side of the stream. Has no effect if `objectMode` is `true`. **Default:** `false`. - * `readableHighWaterMark` {number} Sets `highWaterMark` for the readable side + - `readableHighWaterMark` {number} Sets `highWaterMark` for the readable side of the stream. Has no effect if `highWaterMark` is provided. - * `writableHighWaterMark` {number} Sets `highWaterMark` for the writable side + - `writableHighWaterMark` {number} Sets `highWaterMark` for the writable side of the stream. Has no effect if `highWaterMark` is provided. + ```js -const { Duplex } = require('stream'); +const { Duplex } = require("stream"); class MyDuplex extends Duplex { constructor(options) { @@ -2240,12 +2292,11 @@ class MyDuplex extends Duplex { Or, when using pre-ES6 style constructors: ```js -const { Duplex } = require('stream'); -const util = require('util'); +const { Duplex } = require("stream"); +const util = require("util"); function MyDuplex(options) { - if (!(this instanceof MyDuplex)) - return new MyDuplex(options); + if (!(this instanceof MyDuplex)) return new MyDuplex(options); Duplex.call(this, options); } util.inherits(MyDuplex, Duplex); @@ -2254,7 +2305,7 @@ util.inherits(MyDuplex, Duplex); Or, using the Simplified Constructor approach: ```js -const { Duplex } = require('stream'); +const { Duplex } = require("stream"); const myDuplex = new Duplex({ read(size) { @@ -2269,16 +2320,15 @@ const myDuplex = new Duplex({ #### An Example Duplex Stream The following illustrates a simple example of a `Duplex` stream that wraps a -hypothetical lower-level source object to which data can be written, and -from which data can be read, albeit using an API that is not compatible with -Node.js streams. -The following illustrates a simple example of a `Duplex` stream that buffers -incoming written data via the [`Writable`][] interface that is read back out -via the [`Readable`][] interface. +hypothetical lower-level source object to which data can be written, and from +which data can be read, albeit using an API that is not compatible with Node.js +streams. The following illustrates a simple example of a `Duplex` stream that +buffers incoming written data via the [`Writable`][] interface that is read back +out via the [`Readable`][] interface. ```js -const { Duplex } = require('stream'); -const kSource = Symbol('source'); +const { Duplex } = require("stream"); +const kSource = Symbol("source"); class MyDuplex extends Duplex { constructor(source, options) { @@ -2288,8 +2338,7 @@ class MyDuplex extends Duplex { _write(chunk, encoding, callback) { // The underlying source only deals with strings. - if (Buffer.isBuffer(chunk)) - chunk = chunk.toString(); + if (Buffer.isBuffer(chunk)) chunk = chunk.toString(); this[kSource].writeSomeData(chunk); callback(); } @@ -2314,11 +2363,11 @@ For `Duplex` streams, `objectMode` can be set exclusively for either the In the following example, for instance, a new `Transform` stream (which is a type of [`Duplex`][] stream) is created that has an object mode `Writable` side -that accepts JavaScript numbers that are converted to hexadecimal strings on -the `Readable` side. +that accepts JavaScript numbers that are converted to hexadecimal strings on the +`Readable` side. ```js -const { Transform } = require('stream'); +const { Transform } = require("stream"); // All Transform streams are also Duplex Streams. const myTransform = new Transform({ @@ -2332,12 +2381,12 @@ const myTransform = new Transform({ const data = chunk.toString(16); // Push the data onto the readable queue. - callback(null, '0'.repeat(data.length % 2) + data); + callback(null, "0".repeat(data.length % 2) + data); } }); -myTransform.setEncoding('ascii'); -myTransform.on('data', (chunk) => console.log(chunk)); +myTransform.setEncoding("ascii"); +myTransform.on("data", chunk => console.log(chunk)); myTransform.write(1); // Prints: 01 @@ -2363,8 +2412,8 @@ The `stream.Transform` class is extended to implement a [`Transform`][] stream. The `stream.Transform` class prototypically inherits from `stream.Duplex` and implements its own versions of the `writable._write()` and `readable._read()` -methods. Custom `Transform` implementations *must* implement the -[`transform._transform()`][stream-_transform] method and *may* also implement +methods. Custom `Transform` implementations _must_ implement the +[`transform._transform()`][stream-_transform] method and _may_ also implement the [`transform._flush()`][stream-_flush] method. Care must be taken when using `Transform` streams in that data written to the @@ -2373,16 +2422,17 @@ output on the `Readable` side is not consumed. #### new stream.Transform([options]) -* `options` {Object} Passed to both `Writable` and `Readable` - constructors. Also has the following fields: - * `transform` {Function} Implementation for the +- `options` {Object} Passed to both `Writable` and `Readable` constructors. Also + has the following fields: + - `transform` {Function} Implementation for the [`stream._transform()`][stream-_transform] method. - * `flush` {Function} Implementation for the [`stream._flush()`][stream-_flush] + - `flush` {Function} Implementation for the [`stream._flush()`][stream-_flush] method. + ```js -const { Transform } = require('stream'); +const { Transform } = require("stream"); class MyTransform extends Transform { constructor(options) { @@ -2395,12 +2445,11 @@ class MyTransform extends Transform { Or, when using pre-ES6 style constructors: ```js -const { Transform } = require('stream'); -const util = require('util'); +const { Transform } = require("stream"); +const util = require("util"); function MyTransform(options) { - if (!(this instanceof MyTransform)) - return new MyTransform(options); + if (!(this instanceof MyTransform)) return new MyTransform(options); Transform.call(this, options); } util.inherits(MyTransform, Transform); @@ -2409,7 +2458,7 @@ util.inherits(MyTransform, Transform); Or, using the Simplified Constructor approach: ```js -const { Transform } = require('stream'); +const { Transform } = require("stream"); const myTransform = new Transform({ transform(chunk, encoding, callback) { @@ -2420,37 +2469,37 @@ const myTransform = new Transform({ #### Events: 'finish' and 'end' -The [`'finish'`][] and [`'end'`][] events are from the `stream.Writable` -and `stream.Readable` classes, respectively. The `'finish'` event is emitted -after [`stream.end()`][stream-end] is called and all chunks have been processed -by [`stream._transform()`][stream-_transform]. The `'end'` event is emitted -after all data has been output, which occurs after the callback in +The [`'finish'`][] and [`'end'`][] events are from the `stream.Writable` and +`stream.Readable` classes, respectively. The `'finish'` event is emitted after +[`stream.end()`][stream-end] is called and all chunks have been processed by +[`stream._transform()`][stream-_transform]. The `'end'` event is emitted after +all data has been output, which occurs after the callback in [`transform._flush()`][stream-_flush] has been called. In the case of an error, neither `'finish'` nor `'end'` should be emitted. #### transform.\_flush(callback) -* `callback` {Function} A callback function (optionally with an error - argument and data) to be called when remaining data has been flushed. +- `callback` {Function} A callback function (optionally with an error argument + and data) to be called when remaining data has been flushed. This function MUST NOT be called by application code directly. It should be implemented by child classes, and called by the internal `Readable` class methods only. -In some cases, a transform operation may need to emit an additional bit of -data at the end of the stream. For example, a `zlib` compression stream will -store an amount of internal state used to optimally compress the output. When -the stream ends, however, that additional data needs to be flushed so that the -compressed data will be complete. +In some cases, a transform operation may need to emit an additional bit of data +at the end of the stream. For example, a `zlib` compression stream will store an +amount of internal state used to optimally compress the output. When the stream +ends, however, that additional data needs to be flushed so that the compressed +data will be complete. -Custom [`Transform`][] implementations *may* implement the `transform._flush()` +Custom [`Transform`][] implementations _may_ implement the `transform._flush()` method. This will be called when there is no more written data to be consumed, but before the [`'end'`][] event is emitted signaling the end of the [`Readable`][] stream. -Within the `transform._flush()` implementation, the `readable.push()` method -may be called zero or more times, as appropriate. The `callback` function must -be called when the flush operation is complete. +Within the `transform._flush()` implementation, the `readable.push()` method may +be called zero or more times, as appropriate. The `callback` function must be +called when the flush operation is complete. The `transform._flush()` method is prefixed with an underscore because it is internal to the class that defines it, and should never be called directly by @@ -2458,30 +2507,29 @@ user programs. #### transform.\_transform(chunk, encoding, callback) -* `chunk` {Buffer|string|any} The `Buffer` to be transformed, converted from - the `string` passed to [`stream.write()`][stream-write]. If the stream's +- `chunk` {Buffer|string|any} The `Buffer` to be transformed, converted from the + `string` passed to [`stream.write()`][stream-write]. If the stream's `decodeStrings` option is `false` or the stream is operating in object mode, the chunk will not be converted & will be whatever was passed to [`stream.write()`][stream-write]. -* `encoding` {string} If the chunk is a string, then this is the - encoding type. If chunk is a buffer, then this is the special - value - `'buffer'`, ignore it in this case. -* `callback` {Function} A callback function (optionally with an error - argument and data) to be called after the supplied `chunk` has been - processed. +- `encoding` {string} If the chunk is a string, then this is the encoding type. + If chunk is a buffer, then this is the special value - `'buffer'`, ignore it + in this case. +- `callback` {Function} A callback function (optionally with an error argument + and data) to be called after the supplied `chunk` has been processed. This function MUST NOT be called by application code directly. It should be implemented by child classes, and called by the internal `Readable` class methods only. -All `Transform` stream implementations must provide a `_transform()` -method to accept input and produce output. The `transform._transform()` -implementation handles the bytes being written, computes an output, then passes -that output off to the readable portion using the `readable.push()` method. +All `Transform` stream implementations must provide a `_transform()` method to +accept input and produce output. The `transform._transform()` implementation +handles the bytes being written, computes an output, then passes that output off +to the readable portion using the `readable.push()` method. The `transform.push()` method may be called zero or more times to generate -output from a single input chunk, depending on how much is to be output -as a result of the chunk. +output from a single input chunk, depending on how much is to be output as a +result of the chunk. It is possible that no output is generated from any given chunk of input data. @@ -2502,13 +2550,13 @@ transform.prototype._transform = function(data, encoding, callback) { }; ``` -The `transform._transform()` method is prefixed with an underscore because it -is internal to the class that defines it, and should never be called directly by +The `transform._transform()` method is prefixed with an underscore because it is +internal to the class that defines it, and should never be called directly by user programs. -`transform._transform()` is never called in parallel; streams implement a -queue mechanism, and to receive the next chunk, `callback` must be -called, either synchronously or asynchronously. +`transform._transform()` is never called in parallel; streams implement a queue +mechanism, and to receive the next chunk, `callback` must be called, either +synchronously or asynchronously. #### Class: stream.PassThrough @@ -2524,11 +2572,11 @@ primarily for examples and testing, but there are some use cases where ### Streams Compatibility with Async Generators and Async Iterators With the support of async generators and iterators in JavaScript, async -generators are effectively a first-class language-level stream construct at -this point. +generators are effectively a first-class language-level stream construct at this +point. -Some common interop cases of using Node.js streams with async generators -and async iterators are provided below. +Some common interop cases of using Node.js streams with async generators and +async iterators are provided below. #### Consuming Readable Streams with Async Iterators @@ -2545,41 +2593,40 @@ unhandled post-destroy errors. #### Creating Readable Streams with Async Generators -We can construct a Node.js Readable Stream from an asynchronous generator -using the `Readable.from()` utility method: +We can construct a Node.js Readable Stream from an asynchronous generator using +the `Readable.from()` utility method: ```js -const { Readable } = require('stream'); +const { Readable } = require("stream"); -async function * generate() { - yield 'a'; - yield 'b'; - yield 'c'; +async function* generate() { + yield "a"; + yield "b"; + yield "c"; } const readable = Readable.from(generate()); -readable.on('data', (chunk) => { +readable.on("data", chunk => { console.log(chunk); }); ``` #### Piping to Writable Streams from Async Iterators -In the scenario of writing to a writable stream from an async iterator, -it is important to ensure the correct handling of backpressure and errors. +In the scenario of writing to a writable stream from an async iterator, it is +important to ensure the correct handling of backpressure and errors. ```js -const { once } = require('events'); +const { once } = require("events"); const finished = util.promisify(stream.finished); -const writable = fs.createWriteStream('./file'); +const writable = fs.createWriteStream("./file"); (async function() { for await (const chunk of iterator) { // Handle backpressure on write(). - if (!writable.write(chunk)) - await once(writable, 'drain'); + if (!writable.write(chunk)) await once(writable, "drain"); } writable.end(); // Ensure completion without errors. @@ -2587,14 +2634,14 @@ const writable = fs.createWriteStream('./file'); })(); ``` -In the above, errors on `write()` would be caught and thrown by the -`once()` listener for the `'drain'` event, since `once()` will also handle the -`'error'` event. To ensure completion of the write stream without errors, -it is safer to use the `finished()` method as above, instead of using the -`once()` listener for the `'finish'` event. Under certain cases, an `'error'` -event could be emitted by the writable stream after `'finish'` and as `once()` -will release the `'error'` handler on handling the `'finish'` event, it could -result in an unhandled error. +In the above, errors on `write()` would be caught and thrown by the `once()` +listener for the `'drain'` event, since `once()` will also handle the `'error'` +event. To ensure completion of the write stream without errors, it is safer to +use the `finished()` method as above, instead of using the `once()` listener for +the `'finish'` event. Under certain cases, an `'error'` event could be emitted +by the writable stream after `'finish'` and as `once()` will release the +`'error'` handler on handling the `'finish'` event, it could result in an +unhandled error. Alternatively, the readable stream could be wrapped with `Readable.from()` and then piped via `.pipe()`: @@ -2602,7 +2649,7 @@ then piped via `.pipe()`: ```js const finished = util.promisify(stream.finished); -const writable = fs.createWriteStream('./file'); +const writable = fs.createWriteStream("./file"); (async function() { const readable = Readable.from(iterator); @@ -2617,7 +2664,7 @@ Or, using `stream.pipeline()` to pipe streams: ```js const pipeline = util.promisify(stream.pipeline); -const writable = fs.createWriteStream('./file'); +const writable = fs.createWriteStream("./file"); (async function() { const readable = Readable.from(iterator); @@ -2634,17 +2681,17 @@ const writable = fs.createWriteStream('./file'); Prior to Node.js 0.10, the `Readable` stream interface was simpler, but also less powerful and less useful. -* Rather than waiting for calls to the [`stream.read()`][stream-read] method, - [`'data'`][] events would begin emitting immediately. Applications that - would need to perform some amount of work to decide how to handle data - were required to store read data into buffers so the data would not be lost. -* The [`stream.pause()`][stream-pause] method was advisory, rather than +- Rather than waiting for calls to the [`stream.read()`][stream-read] method, + [`'data'`][] events would begin emitting immediately. Applications that would + need to perform some amount of work to decide how to handle data were required + to store read data into buffers so the data would not be lost. +- The [`stream.pause()`][stream-pause] method was advisory, rather than guaranteed. This meant that it was still necessary to be prepared to receive - [`'data'`][] events *even when the stream was in a paused state*. + [`'data'`][] events _even when the stream was in a paused state_. -In Node.js 0.10, the [`Readable`][] class was added. For backward -compatibility with older Node.js programs, `Readable` streams switch into -"flowing mode" when a [`'data'`][] event handler is added, or when the +In Node.js 0.10, the [`Readable`][] class was added. For backward compatibility +with older Node.js programs, `Readable` streams switch into "flowing mode" when +a [`'data'`][] event handler is added, or when the [`stream.resume()`][stream-resume] method is called. The effect is that, even when not using the new [`stream.read()`][stream-read] method and [`'readable'`][] event, it is no longer necessary to worry about losing @@ -2653,23 +2700,23 @@ when not using the new [`stream.read()`][stream-read] method and While most applications will continue to function normally, this introduces an edge case in the following conditions: -* No [`'data'`][] event listener is added. -* The [`stream.resume()`][stream-resume] method is never called. -* The stream is not piped to any writable destination. +- No [`'data'`][] event listener is added. +- The [`stream.resume()`][stream-resume] method is never called. +- The stream is not piped to any writable destination. For example, consider the following code: ```js // WARNING! BROKEN! -net.createServer((socket) => { - - // We add an 'end' listener, but never consume the data. - socket.on('end', () => { - // It will never get here. - socket.end('The message was received but was not processed.\n'); - }); - -}).listen(1337); +net + .createServer(socket => { + // We add an 'end' listener, but never consume the data. + socket.on("end", () => { + // It will never get here. + socket.end("The message was received but was not processed.\n"); + }); + }) + .listen(1337); ``` Prior to Node.js 0.10, the incoming message data would be simply discarded. @@ -2680,41 +2727,43 @@ The workaround in this situation is to call the ```js // Workaround. -net.createServer((socket) => { - socket.on('end', () => { - socket.end('The message was received but was not processed.\n'); - }); +net + .createServer(socket => { + socket.on("end", () => { + socket.end("The message was received but was not processed.\n"); + }); - // Start the flow of data, discarding it. - socket.resume(); -}).listen(1337); + // Start the flow of data, discarding it. + socket.resume(); + }) + .listen(1337); ``` -In addition to new `Readable` streams switching into flowing mode, -pre-0.10 style streams can be wrapped in a `Readable` class using the +In addition to new `Readable` streams switching into flowing mode, pre-0.10 +style streams can be wrapped in a `Readable` class using the [`readable.wrap()`][`stream.wrap()`] method. ### `readable.read(0)` There are some cases where it is necessary to trigger a refresh of the -underlying readable stream mechanisms, without actually consuming any -data. In such cases, it is possible to call `readable.read(0)`, which will -always return `null`. +underlying readable stream mechanisms, without actually consuming any data. In +such cases, it is possible to call `readable.read(0)`, which will always return +`null`. -If the internal read buffer is below the `highWaterMark`, and the -stream is not currently reading, then calling `stream.read(0)` will trigger -a low-level [`stream._read()`][stream-_read] call. +If the internal read buffer is below the `highWaterMark`, and the stream is not +currently reading, then calling `stream.read(0)` will trigger a low-level +[`stream._read()`][stream-_read] call. -While most applications will almost never need to do this, there are -situations within Node.js where this is done, particularly in the -`Readable` stream class internals. +While most applications will almost never need to do this, there are situations +within Node.js where this is done, particularly in the `Readable` stream class +internals. ### `readable.push('')` Use of `readable.push('')` is not recommended. Pushing a zero-byte string, `Buffer` or `Uint8Array` to a stream that is not in -object mode has an interesting side effect. Because it *is* a call to +object mode has an interesting side effect. Because it _is_ a call to [`readable.push()`][stream-push], the call will end the reading process. However, because the argument is an empty string, no data is added to the readable buffer so there is nothing for a user to consume. @@ -2737,21 +2786,22 @@ contain multi-byte characters. [`'end'`]: #stream_event_end [`'finish'`]: #stream_event_finish [`'readable'`]: #stream_event_readable -[`Duplex`]: #stream_class_stream_duplex -[`EventEmitter`]: events.html#events_class_eventemitter -[`Readable`]: #stream_class_stream_readable -[`Symbol.hasInstance`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Symbol/hasInstance -[`Transform`]: #stream_class_stream_transform -[`Writable`]: #stream_class_stream_writable -[`fs.createReadStream()`]: fs.html#fs_fs_createreadstream_path_options -[`fs.createWriteStream()`]: fs.html#fs_fs_createwritestream_path_options -[`net.Socket`]: net.html#net_class_net_socket +[`duplex`]: #stream_class_stream_duplex +[`eventemitter`]: events.html#events_class_eventemitter +[`readable`]: #stream_class_stream_readable +[`symbol.hasinstance`]: + https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Symbol/hasInstance +[`transform`]: #stream_class_stream_transform +[`writable`]: #stream_class_stream_writable +[`fs.createreadstream()`]: fs.html#fs_fs_createreadstream_path_options +[`fs.createwritestream()`]: fs.html#fs_fs_createwritestream_path_options +[`net.socket`]: net.html#net_class_net_socket [`process.stderr`]: process.html#process_process_stderr [`process.stdin`]: process.html#process_process_stdin [`process.stdout`]: process.html#process_process_stdout [`readable.push('')`]: #stream_readable_push -[`readable.setEncoding()`]: #stream_readable_setencoding_encoding -[`stream.Readable.from()`]: #stream_stream_readable_from_iterable_options +[`readable.setencoding()`]: #stream_readable_setencoding_encoding +[`stream.readable.from()`]: #stream_stream_readable_from_iterable_options [`stream.cork()`]: #stream_writable_cork [`stream.finished()`]: #stream_stream_finished_stream_options_callback [`stream.pipe()`]: #stream_readable_pipe_destination_options @@ -2762,21 +2812,23 @@ contain multi-byte characters. [`writable.cork()`]: #stream_writable_cork [`writable.end()`]: #stream_writable_end_chunk_encoding_callback [`writable.uncork()`]: #stream_writable_uncork -[`writable.writableFinished`]: #stream_writable_writablefinished -[`zlib.createDeflate()`]: zlib.html#zlib_zlib_createdeflate_options -[API for Stream Consumers]: #stream_api_for_stream_consumers -[API for Stream Implementers]: #stream_api_for_stream_implementers -[Compatibility]: #stream_compatibility_with_older_node_js_versions -[HTTP requests, on the client]: http.html#http_class_http_clientrequest -[HTTP responses, on the server]: http.html#http_class_http_serverresponse -[TCP sockets]: net.html#net_class_net_socket +[`writable.writablefinished`]: #stream_writable_writablefinished +[`zlib.createdeflate()`]: zlib.html#zlib_zlib_createdeflate_options +[api for stream consumers]: #stream_api_for_stream_consumers +[api for stream implementers]: #stream_api_for_stream_implementers +[compatibility]: #stream_compatibility_with_older_node_js_versions +[http requests, on the client]: http.html#http_class_http_clientrequest +[http responses, on the server]: http.html#http_class_http_serverresponse +[tcp sockets]: net.html#net_class_net_socket [child process stdin]: child_process.html#child_process_subprocess_stdin -[child process stdout and stderr]: child_process.html#child_process_subprocess_stdout +[child process stdout and stderr]: + child_process.html#child_process_subprocess_stdout [crypto]: crypto.html [fs read streams]: fs.html#fs_class_fs_readstream [fs write streams]: fs.html#fs_class_fs_writestream [http-incoming-message]: http.html#http_class_http_incomingmessage -[hwm-gotcha]: #stream_highwatermark_discrepancy_after_calling_readable_setencoding +[hwm-gotcha]: + #stream_highwatermark_discrepancy_after_calling_readable_setencoding [object-mode]: #stream_object_mode [readable-_destroy]: #stream_readable_destroy_err_callback [readable-destroy]: #stream_readable_destroy_error @@ -2792,7 +2844,7 @@ contain multi-byte characters. [stream-read]: #stream_readable_read_size [stream-resume]: #stream_readable_resume [stream-write]: #stream_writable_write_chunk_encoding_callback -[Stream Three States]: #stream_three_states +[stream three states]: #stream_three_states [writable-_destroy]: #stream_writable_destroy_err_callback [writable-destroy]: #stream_writable_destroy_error [writable-new]: #stream_constructor_new_stream_writable_options diff --git a/lib/_http_common.js b/lib/_http_common.js index 732f4f29c42758..414386edbad63c 100644 --- a/lib/_http_common.js +++ b/lib/_http_common.js @@ -19,24 +19,20 @@ // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE // USE OR OTHER DEALINGS IN THE SOFTWARE. -'use strict'; +"use strict"; const { Math } = primordials; -const { setImmediate } = require('timers'); +const { setImmediate } = require("timers"); -const { methods, HTTPParser } = internalBinding('http_parser'); +const { methods, HTTPParser } = internalBinding("http_parser"); -const FreeList = require('internal/freelist'); -const incoming = require('_http_incoming'); -const { - IncomingMessage, - readStart, - readStop -} = incoming; +const FreeList = require("internal/freelist"); +const incoming = require("_http_incoming"); +const { IncomingMessage, readStart, readStop } = incoming; -const debug = require('internal/util/debuglog').debuglog('http'); +const debug = require("internal/util/debuglog").debuglog("http"); -const kIncomingMessage = Symbol('IncomingMessage'); +const kIncomingMessage = Symbol("IncomingMessage"); const kOnHeaders = HTTPParser.kOnHeaders | 0; const kOnHeadersComplete = HTTPParser.kOnHeadersComplete | 0; const kOnBody = HTTPParser.kOnBody | 0; @@ -52,8 +48,7 @@ const MAX_HEADER_PAIRS = 2000; // called to process trailing HTTP headers. function parserOnHeaders(headers, url) { // Once we exceeded headers limit - stop collecting them - if (this.maxHeaderPairs <= 0 || - this._headers.length < this.maxHeaderPairs) { + if (this.maxHeaderPairs <= 0 || this._headers.length < this.maxHeaderPairs) { this._headers = this._headers.concat(headers); } this._url += url; @@ -63,9 +58,17 @@ function parserOnHeaders(headers, url) { // this request. // `url` is not set for response parsers but that's not applicable here since // all our parsers are request parsers. -function parserOnHeadersComplete(versionMajor, versionMinor, headers, method, - url, statusCode, statusMessage, upgrade, - shouldKeepAlive) { +function parserOnHeadersComplete( + versionMajor, + versionMinor, + headers, + method, + url, + statusCode, + statusMessage, + upgrade, + shouldKeepAlive +) { const parser = this; const { socket } = parser; @@ -76,15 +79,15 @@ function parserOnHeadersComplete(versionMajor, versionMinor, headers, method, if (url === undefined) { url = parser._url; - parser._url = ''; + parser._url = ""; } // Parser is also used by http client - const ParserIncomingMessage = (socket && socket.server && - socket.server[kIncomingMessage]) || - IncomingMessage; + const ParserIncomingMessage = + (socket && socket.server && socket.server[kIncomingMessage]) || + IncomingMessage; - const incoming = parser.incoming = new ParserIncomingMessage(socket); + const incoming = (parser.incoming = new ParserIncomingMessage(socket)); incoming.httpVersionMajor = versionMajor; incoming.httpVersionMinor = versionMinor; incoming.httpVersion = `${versionMajor}.${versionMinor}`; @@ -94,12 +97,11 @@ function parserOnHeadersComplete(versionMajor, versionMinor, headers, method, var n = headers.length; // If parser.maxHeaderPairs <= 0 assume that there's no limit. - if (parser.maxHeaderPairs > 0) - n = Math.min(n, parser.maxHeaderPairs); + if (parser.maxHeaderPairs > 0) n = Math.min(n, parser.maxHeaderPairs); incoming._addHeaderLines(headers, n); - if (typeof method === 'number') { + if (typeof method === "number") { // server only incoming.method = methods[method]; } else { @@ -115,15 +117,13 @@ function parserOnBody(b, start, len) { const stream = this.incoming; // If the stream has already been removed, then drop it. - if (stream === null) - return; + if (stream === null) return; // Pretend this was the result of a stream._read call. if (len > 0 && !stream._dumped) { var slice = b.slice(start, start + len); var ret = stream.push(slice); - if (!ret) - readStop(this.socket); + if (!ret) readStop(this.socket); } } @@ -138,7 +138,7 @@ function parserOnMessageComplete() { if (headers.length) { stream._addHeaderLines(headers, headers.length); parser._headers = []; - parser._url = ''; + parser._url = ""; } // For emit end event @@ -149,8 +149,7 @@ function parserOnMessageComplete() { readStart(parser.socket); } - -const parsers = new FreeList('parsers', 1000, function parsersCb() { +const parsers = new FreeList("parsers", 1000, function parsersCb() { const parser = new HTTPParser(); cleanParser(parser); @@ -164,7 +163,9 @@ const parsers = new FreeList('parsers', 1000, function parsersCb() { return parser; }); -function closeParserInstance(parser) { parser.close(); } +function closeParserInstance(parser) { + parser.close(); +} // Free the parser and also break any links that it // might have to any other things. @@ -175,8 +176,7 @@ function closeParserInstance(parser) { parser.close(); } // should be all that is needed. function freeParser(parser, req, socket) { if (parser) { - if (parser._consumed) - parser.unconsume(); + if (parser._consumed) parser.unconsume(); cleanParser(parser); if (parsers.free(parser) === false) { // Make sure the parser's stack has unwound before deleting the @@ -219,7 +219,7 @@ function checkInvalidHeaderChar(val) { function cleanParser(parser) { parser._headers = []; - parser._url = ''; + parser._url = ""; parser.socket = null; parser.incoming = null; parser.outgoing = null; @@ -230,7 +230,7 @@ function cleanParser(parser) { function prepareError(err, parser, rawPacket) { err.rawPacket = rawPacket || parser.getCurrentBuffer(); - if (typeof err.reason === 'string') + if (typeof err.reason === "string") err.message = `Parse Error: ${err.reason}`; } @@ -239,12 +239,12 @@ module.exports = { _checkIsHttpToken: checkIsHttpToken, chunkExpression: /(?:^|\W)chunked(?:$|\W)/i, continueExpression: /(?:^|\W)100-continue(?:$|\W)/i, - CRLF: '\r\n', + CRLF: "\r\n", debug, freeParser, methods, parsers, kIncomingMessage, HTTPParser, - prepareError, + prepareError }; diff --git a/lib/_stream_writable.js b/lib/_stream_writable.js index 4cb3be5c008e9e..2d8c4a5e3f0c30 100644 --- a/lib/_stream_writable.js +++ b/lib/_stream_writable.js @@ -23,18 +23,18 @@ // Implement an async ._write(chunk, encoding, cb), and it'll handle all // the drain event emission and buffering. -'use strict'; +"use strict"; const { Object } = primordials; module.exports = Writable; Writable.WritableState = WritableState; -const internalUtil = require('internal/util'); -const Stream = require('stream'); -const { Buffer } = require('buffer'); -const destroyImpl = require('internal/streams/destroy'); -const { getHighWaterMark } = require('internal/streams/state'); +const internalUtil = require("internal/util"); +const Stream = require("stream"); +const { Buffer } = require("buffer"); +const destroyImpl = require("internal/streams/destroy"); +const { getHighWaterMark } = require("internal/streams/state"); const { ERR_INVALID_ARG_TYPE, ERR_METHOD_NOT_IMPLEMENTED, @@ -45,7 +45,7 @@ const { ERR_STREAM_NULL_VALUES, ERR_STREAM_WRITE_AFTER_END, ERR_UNKNOWN_ENCODING -} = require('internal/errors').codes; +} = require("internal/errors").codes; const { errorOrDestroy } = destroyImpl; @@ -62,8 +62,7 @@ function WritableState(options, stream, isDuplex) { // However, some cases require setting options to different // values for the readable and the writable sides of the duplex stream, // e.g. options.readableObjectMode vs. options.writableObjectMode, etc. - if (typeof isDuplex !== 'boolean') - isDuplex = stream instanceof Stream.Duplex; + if (typeof isDuplex !== "boolean") isDuplex = stream instanceof Stream.Duplex; // Object stream flag to indicate whether or not this stream // contains buffers or objects. @@ -75,8 +74,12 @@ function WritableState(options, stream, isDuplex) { // The point at which write() starts returning false // Note: 0 is a valid value, means that we always return false if // the entire buffer is not flushed immediately on write() - this.highWaterMark = getHighWaterMark(this, options, 'writableHighWaterMark', - isDuplex); + this.highWaterMark = getHighWaterMark( + this, + options, + "writableHighWaterMark", + isDuplex + ); // if _final has been called this.finalCalled = false; @@ -102,7 +105,7 @@ function WritableState(options, stream, isDuplex) { // Crypto is kind of old and crusty. Historically, its default string // encoding is 'binary' so we have to make this configurable. // Everything else in the universe uses 'utf8', though. - this.defaultEncoding = options.defaultEncoding || 'utf8'; + this.defaultEncoding = options.defaultEncoding || "utf8"; // Not an actual buffer we keep track of, but a measurement // of how much we're waiting to get pushed to some underlying @@ -175,24 +178,26 @@ WritableState.prototype.getBuffer = function getBuffer() { return out; }; -Object.defineProperty(WritableState.prototype, 'buffer', { - get: internalUtil.deprecate(function writableStateBufferGetter() { - return this.getBuffer(); - }, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + - 'instead.', 'DEP0003') +Object.defineProperty(WritableState.prototype, "buffer", { + get: internalUtil.deprecate( + function writableStateBufferGetter() { + return this.getBuffer(); + }, + "_writableState.buffer is deprecated. Use _writableState.getBuffer " + + "instead.", + "DEP0003" + ) }); // Test _writableState for inheritance to account for Duplex streams, // whose prototype chain only points to Readable. var realHasInstance; -if (typeof Symbol === 'function' && Symbol.hasInstance) { +if (typeof Symbol === "function" && Symbol.hasInstance) { realHasInstance = Function.prototype[Symbol.hasInstance]; Object.defineProperty(Writable, Symbol.hasInstance, { value: function(object) { - if (realHasInstance.call(this, object)) - return true; - if (this !== Writable) - return false; + if (realHasInstance.call(this, object)) return true; + if (this !== Writable) return false; return object && object._writableState instanceof WritableState; } @@ -214,7 +219,7 @@ function Writable(options) { // Checking for a Stream.Duplex instance is faster here instead of inside // the WritableState constructor, at least with V8 6.5 - const isDuplex = (this instanceof Stream.Duplex); + const isDuplex = this instanceof Stream.Duplex; if (!isDuplex && !realHasInstance.call(Writable, this)) return new Writable(options); @@ -225,17 +230,13 @@ function Writable(options) { this.writable = true; if (options) { - if (typeof options.write === 'function') - this._write = options.write; + if (typeof options.write === "function") this._write = options.write; - if (typeof options.writev === 'function') - this._writev = options.writev; + if (typeof options.writev === "function") this._writev = options.writev; - if (typeof options.destroy === 'function') - this._destroy = options.destroy; + if (typeof options.destroy === "function") this._destroy = options.destroy; - if (typeof options.final === 'function') - this._final = options.final; + if (typeof options.final === "function") this._final = options.final; } Stream.call(this); @@ -246,7 +247,6 @@ Writable.prototype.pipe = function() { errorOrDestroy(this, new ERR_STREAM_CANNOT_PIPE()); }; - function writeAfterEnd(stream, cb) { const er = new ERR_STREAM_WRITE_AFTER_END(); // TODO: defer error events consistently everywhere, not just the cb @@ -262,8 +262,8 @@ function validChunk(stream, state, chunk, cb) { if (chunk === null) { er = new ERR_STREAM_NULL_VALUES(); - } else if (typeof chunk !== 'string' && !state.objectMode) { - er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer'], chunk); + } else if (typeof chunk !== "string" && !state.objectMode) { + er = new ERR_INVALID_ARG_TYPE("chunk", ["string", "Buffer"], chunk); } if (er) { errorOrDestroy(stream, er); @@ -283,21 +283,17 @@ Writable.prototype.write = function(chunk, encoding, cb) { chunk = Stream._uint8ArrayToBuffer(chunk); } - if (typeof encoding === 'function') { + if (typeof encoding === "function") { cb = encoding; encoding = null; } - if (isBuf) - encoding = 'buffer'; - else if (!encoding) - encoding = state.defaultEncoding; + if (isBuf) encoding = "buffer"; + else if (!encoding) encoding = state.defaultEncoding; - if (typeof cb !== 'function') - cb = nop; + if (typeof cb !== "function") cb = nop; - if (state.ending) - writeAfterEnd(this, cb); + if (state.ending) writeAfterEnd(this, cb); else if (isBuf || validChunk(this, state, chunk, cb)) { state.pendingcb++; ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb); @@ -316,25 +312,25 @@ Writable.prototype.uncork = function() { if (state.corked) { state.corked--; - if (!state.writing && - !state.corked && - !state.bufferProcessing && - state.bufferedRequest) + if ( + !state.writing && + !state.corked && + !state.bufferProcessing && + state.bufferedRequest + ) clearBuffer(this, state); } }; Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) { // node::ParseEncoding() requires lower case. - if (typeof encoding === 'string') - encoding = encoding.toLowerCase(); - if (!Buffer.isEncoding(encoding)) - throw new ERR_UNKNOWN_ENCODING(encoding); + if (typeof encoding === "string") encoding = encoding.toLowerCase(); + if (!Buffer.isEncoding(encoding)) throw new ERR_UNKNOWN_ENCODING(encoding); this._writableState.defaultEncoding = encoding; return this; }; -Object.defineProperty(Writable.prototype, 'writableBuffer', { +Object.defineProperty(Writable.prototype, "writableBuffer", { // Making it explicit this property is not enumerable // because otherwise some prototype manipulation in // userland will fail @@ -345,15 +341,17 @@ Object.defineProperty(Writable.prototype, 'writableBuffer', { }); function decodeChunk(state, chunk, encoding) { - if (!state.objectMode && - state.decodeStrings !== false && - typeof chunk === 'string') { + if ( + !state.objectMode && + state.decodeStrings !== false && + typeof chunk === "string" + ) { chunk = Buffer.from(chunk, encoding); } return chunk; } -Object.defineProperty(Writable.prototype, 'writableEnded', { +Object.defineProperty(Writable.prototype, "writableEnded", { // Making it explicit this property is not enumerable // because otherwise some prototype manipulation in // userland will fail @@ -363,7 +361,7 @@ Object.defineProperty(Writable.prototype, 'writableEnded', { } }); -Object.defineProperty(Writable.prototype, 'writableHighWaterMark', { +Object.defineProperty(Writable.prototype, "writableHighWaterMark", { // Making it explicit this property is not enumerable // because otherwise some prototype manipulation in // userland will fail @@ -381,7 +379,7 @@ function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) { var newChunk = decodeChunk(state, chunk, encoding); if (chunk !== newChunk) { isBuf = true; - encoding = 'buffer'; + encoding = "buffer"; chunk = newChunk; } } @@ -391,8 +389,7 @@ function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) { const ret = state.length < state.highWaterMark; // We must ensure that previous needDrain will not be reset to false. - if (!ret) - state.needDrain = true; + if (!ret) state.needDrain = true; if (state.writing || state.corked) { var last = state.lastBufferedRequest; @@ -421,12 +418,9 @@ function doWrite(stream, state, writev, len, chunk, encoding, cb) { state.writecb = cb; state.writing = true; state.sync = true; - if (state.destroyed) - state.onwrite(new ERR_STREAM_DESTROYED('write')); - else if (writev) - stream._writev(chunk, state.onwrite); - else - stream._write(chunk, encoding, state.onwrite); + if (state.destroyed) state.onwrite(new ERR_STREAM_DESTROYED("write")); + else if (writev) stream._writev(chunk, state.onwrite); + else stream._write(chunk, encoding, state.onwrite); state.sync = false; } @@ -450,24 +444,24 @@ function onwrite(stream, er) { const sync = state.sync; const cb = state.writecb; - if (typeof cb !== 'function') - throw new ERR_MULTIPLE_CALLBACK(); + if (typeof cb !== "function") throw new ERR_MULTIPLE_CALLBACK(); state.writing = false; state.writecb = null; state.length -= state.writelen; state.writelen = 0; - if (er) - onwriteError(stream, state, sync, er, cb); + if (er) onwriteError(stream, state, sync, er, cb); else { // Check if we're actually ready to finish, but don't emit yet var finished = needFinish(state) || stream.destroyed; - if (!finished && - !state.corked && - !state.bufferProcessing && - state.bufferedRequest) { + if ( + !finished && + !state.corked && + !state.bufferProcessing && + state.bufferedRequest + ) { clearBuffer(stream, state); } @@ -480,11 +474,11 @@ function onwrite(stream, er) { } function afterWrite(stream, state, cb) { - const needDrain = !state.ending && !stream.destroyed && state.length === 0 && - state.needDrain; + const needDrain = + !state.ending && !stream.destroyed && state.length === 0 && state.needDrain; if (needDrain) { state.needDrain = false; - stream.emit('drain'); + stream.emit("drain"); } state.pendingcb--; cb(); @@ -507,14 +501,13 @@ function clearBuffer(stream, state) { var allBuffers = true; while (entry) { buffer[count] = entry; - if (!entry.isBuf) - allBuffers = false; + if (!entry.isBuf) allBuffers = false; entry = entry.next; count += 1; } buffer.allBuffers = allBuffers; - doWrite(stream, state, true, state.length, buffer, '', holder.finish); + doWrite(stream, state, true, state.length, buffer, "", holder.finish); // doWrite is almost always async, defer these to save a bit of time // as the hot path ends with doWrite @@ -549,8 +542,7 @@ function clearBuffer(stream, state) { } } - if (entry === null) - state.lastBufferedRequest = null; + if (entry === null) state.lastBufferedRequest = null; } state.bufferedRequest = entry; @@ -558,7 +550,7 @@ function clearBuffer(stream, state) { } Writable.prototype._write = function(chunk, encoding, cb) { - cb(new ERR_METHOD_NOT_IMPLEMENTED('_write()')); + cb(new ERR_METHOD_NOT_IMPLEMENTED("_write()")); }; Writable.prototype._writev = null; @@ -566,17 +558,16 @@ Writable.prototype._writev = null; Writable.prototype.end = function(chunk, encoding, cb) { const state = this._writableState; - if (typeof chunk === 'function') { + if (typeof chunk === "function") { cb = chunk; chunk = null; encoding = null; - } else if (typeof encoding === 'function') { + } else if (typeof encoding === "function") { cb = encoding; encoding = null; } - if (chunk !== null && chunk !== undefined) - this.write(chunk, encoding); + if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); // .end() fully uncorks if (state.corked) { @@ -585,20 +576,19 @@ Writable.prototype.end = function(chunk, encoding, cb) { } // Ignore unnecessary end() calls. - if (!state.ending) - endWritable(this, state, cb); - else if (typeof cb === 'function') { + if (!state.ending) endWritable(this, state, cb); + else if (typeof cb === "function") { if (!state.finished) { - this.once('finish', cb); + this.once("finish", cb); } else { - cb(new ERR_STREAM_ALREADY_FINISHED('end')); + cb(new ERR_STREAM_ALREADY_FINISHED("end")); } } return this; }; -Object.defineProperty(Writable.prototype, 'writableLength', { +Object.defineProperty(Writable.prototype, "writableLength", { // Making it explicit this property is not enumerable // because otherwise some prototype manipulation in // userland will fail @@ -609,34 +599,36 @@ Object.defineProperty(Writable.prototype, 'writableLength', { }); function needFinish(state) { - return (state.ending && - state.length === 0 && - !state.errorEmitted && - state.bufferedRequest === null && - !state.finished && - !state.writing); + return ( + state.ending && + state.length === 0 && + !state.errorEmitted && + state.bufferedRequest === null && + !state.finished && + !state.writing + ); } function callFinal(stream, state) { - stream._final((err) => { + stream._final(err => { state.pendingcb--; if (err) { errorOrDestroy(stream, err); } else { state.prefinished = true; - stream.emit('prefinish'); + stream.emit("prefinish"); finishMaybe(stream, state); } }); } function prefinish(stream, state) { if (!state.prefinished && !state.finalCalled) { - if (typeof stream._final === 'function' && !state.destroyed) { + if (typeof stream._final === "function" && !state.destroyed) { state.pendingcb++; state.finalCalled = true; process.nextTick(callFinal, stream, state); } else { state.prefinished = true; - stream.emit('prefinish'); + stream.emit("prefinish"); } } } @@ -647,7 +639,7 @@ function finishMaybe(stream, state) { prefinish(stream, state); if (state.pendingcb === 0) { state.finished = true; - stream.emit('finish'); + stream.emit("finish"); if (state.autoDestroy) { // In case of duplex streams we need a way to detect @@ -666,10 +658,8 @@ function endWritable(stream, state, cb) { state.ending = true; finishMaybe(stream, state); if (cb) { - if (state.finished) - process.nextTick(cb); - else - stream.once('finish', cb); + if (state.finished) process.nextTick(cb); + else stream.once("finish", cb); } state.ended = true; stream.writable = false; @@ -689,7 +679,7 @@ function onCorkedFinish(corkReq, state, err) { state.corkedRequestsFree.next = corkReq; } -Object.defineProperty(Writable.prototype, 'destroyed', { +Object.defineProperty(Writable.prototype, "destroyed", { // Making it explicit this property is not enumerable // because otherwise some prototype manipulation in // userland will fail @@ -713,14 +703,14 @@ Object.defineProperty(Writable.prototype, 'destroyed', { } }); -Object.defineProperty(Writable.prototype, 'writableObjectMode', { +Object.defineProperty(Writable.prototype, "writableObjectMode", { enumerable: false, get() { return this._writableState ? this._writableState.objectMode : false; } }); -Object.defineProperty(Writable.prototype, 'writableFinished', { +Object.defineProperty(Writable.prototype, "writableFinished", { // Making it explicit this property is not enumerable // because otherwise some prototype manipulation in // userland will fail diff --git a/lib/internal/bootstrap/node.js b/lib/internal/bootstrap/node.js index 18acd9d2b64774..54c35a0e4ff41e 100644 --- a/lib/internal/bootstrap/node.js +++ b/lib/internal/bootstrap/node.js @@ -27,7 +27,7 @@ // run additional setups exported by `lib/internal/bootstrap/pre_execution.js`, // depending on the execution mode. -'use strict'; +"use strict"; // This file is compiled as if it's wrapped in a function with arguments // passed by node::RunBootstrapping() @@ -36,8 +36,8 @@ setupPrepareStackTrace(); const { JSON, Object, Symbol } = primordials; -const config = internalBinding('config'); -const { deprecate } = require('internal/util'); +const config = internalBinding("config"); +const { deprecate } = require("internal/util"); setupProcessObject(); @@ -48,25 +48,21 @@ process.domain = null; process._exiting = false; // Bootstrappers for all threads, including worker threads and main thread -const perThreadSetup = require('internal/process/per_thread'); +const perThreadSetup = require("internal/process/per_thread"); // Bootstrappers for the main thread only let mainThreadSetup; // Bootstrappers for the worker threads only let workerThreadSetup; if (ownsProcessState) { - mainThreadSetup = require( - 'internal/process/main_thread_only' - ); + mainThreadSetup = require("internal/process/main_thread_only"); } else { - workerThreadSetup = require( - 'internal/process/worker_thread_only' - ); + workerThreadSetup = require("internal/process/worker_thread_only"); } // process.config is serialized config.gypi -process.config = JSON.parse(internalBinding('native_module').config); +process.config = JSON.parse(internalBinding("native_module").config); -const rawMethods = internalBinding('process_methods'); +const rawMethods = internalBinding("process_methods"); // Set up methods and events on the process object for the main thread if (isMainThread) { process.abort = rawMethods.abort; @@ -78,14 +74,13 @@ if (isMainThread) { // TODO(joyeecheung): deprecate and remove these underscore methods process._debugProcess = rawMethods._debugProcess; process._debugEnd = rawMethods._debugEnd; - process._startProfilerIdleNotifier = - rawMethods._startProfilerIdleNotifier; + process._startProfilerIdleNotifier = rawMethods._startProfilerIdleNotifier; process._stopProfilerIdleNotifier = rawMethods._stopProfilerIdleNotifier; } else { const wrapped = workerThreadSetup.wrapProcessMethods(rawMethods); - process.abort = workerThreadSetup.unavailable('process.abort()'); - process.chdir = workerThreadSetup.unavailable('process.chdir()'); + process.abort = workerThreadSetup.unavailable("process.abort()"); + process.chdir = workerThreadSetup.unavailable("process.chdir()"); process.umask = wrapped.umask; process.cwd = rawMethods.cwd; } @@ -114,7 +109,7 @@ if (isMainThread) { process.exit = wrapped.exit; } -const credentials = internalBinding('credentials'); +const credentials = internalBinding("credentials"); if (credentials.implementsPosixCredentials) { process.getuid = credentials.getuid; process.geteuid = credentials.geteuid; @@ -131,23 +126,28 @@ if (credentials.implementsPosixCredentials) { process.setgid = wrapped.setgid; process.setuid = wrapped.setuid; } else { - process.initgroups = - workerThreadSetup.unavailable('process.initgroups()'); - process.setgroups = workerThreadSetup.unavailable('process.setgroups()'); - process.setegid = workerThreadSetup.unavailable('process.setegid()'); - process.seteuid = workerThreadSetup.unavailable('process.seteuid()'); - process.setgid = workerThreadSetup.unavailable('process.setgid()'); - process.setuid = workerThreadSetup.unavailable('process.setuid()'); + process.initgroups = workerThreadSetup.unavailable("process.initgroups()"); + process.setgroups = workerThreadSetup.unavailable("process.setgroups()"); + process.setegid = workerThreadSetup.unavailable("process.setegid()"); + process.seteuid = workerThreadSetup.unavailable("process.seteuid()"); + process.setgid = workerThreadSetup.unavailable("process.setgid()"); + process.setuid = workerThreadSetup.unavailable("process.setuid()"); } } if (isMainThread) { - const { getStdout, getStdin, getStderr } = - require('internal/process/stdio').getMainThreadStdio(); + const { + getStdout, + getStdin, + getStderr + } = require("internal/process/stdio").getMainThreadStdio(); setupProcessStdio(getStdout, getStdin, getStderr); } else { - const { getStdout, getStdin, getStderr } = - workerThreadSetup.createStdioGetters(); + const { + getStdout, + getStdin, + getStderr + } = workerThreadSetup.createStdioGetters(); setupProcessStdio(getStdout, getStdin, getStderr); } @@ -155,57 +155,55 @@ if (isMainThread) { // process. They use the same functions as the JS embedder API. These callbacks // are setup immediately to prevent async_wrap.setupHooks() from being hijacked // and the cost of doing so is negligible. -const { nativeHooks } = require('internal/async_hooks'); -internalBinding('async_wrap').setupHooks(nativeHooks); +const { nativeHooks } = require("internal/async_hooks"); +internalBinding("async_wrap").setupHooks(nativeHooks); const { setupTaskQueue, queueMicrotask -} = require('internal/process/task_queues'); +} = require("internal/process/task_queues"); if (!config.noBrowserGlobals) { // Override global console from the one provided by the VM // to the one implemented by Node.js // https://console.spec.whatwg.org/#console-namespace - exposeNamespace(global, 'console', createGlobalConsole(global.console)); + exposeNamespace(global, "console", createGlobalConsole(global.console)); - const { URL, URLSearchParams } = require('internal/url'); + const { URL, URLSearchParams } = require("internal/url"); // https://url.spec.whatwg.org/#url - exposeInterface(global, 'URL', URL); + exposeInterface(global, "URL", URL); // https://url.spec.whatwg.org/#urlsearchparams - exposeInterface(global, 'URLSearchParams', URLSearchParams); + exposeInterface(global, "URLSearchParams", URLSearchParams); - const { - TextEncoder, TextDecoder - } = require('internal/encoding'); + const { TextEncoder, TextDecoder } = require("internal/encoding"); // https://encoding.spec.whatwg.org/#textencoder - exposeInterface(global, 'TextEncoder', TextEncoder); + exposeInterface(global, "TextEncoder", TextEncoder); // https://encoding.spec.whatwg.org/#textdecoder - exposeInterface(global, 'TextDecoder', TextDecoder); + exposeInterface(global, "TextDecoder", TextDecoder); // https://html.spec.whatwg.org/multipage/webappapis.html#windoworworkerglobalscope - const timers = require('timers'); - defineOperation(global, 'clearInterval', timers.clearInterval); - defineOperation(global, 'clearTimeout', timers.clearTimeout); - defineOperation(global, 'setInterval', timers.setInterval); - defineOperation(global, 'setTimeout', timers.setTimeout); + const timers = require("timers"); + defineOperation(global, "clearInterval", timers.clearInterval); + defineOperation(global, "clearTimeout", timers.clearTimeout); + defineOperation(global, "setInterval", timers.setInterval); + defineOperation(global, "setTimeout", timers.setTimeout); - defineOperation(global, 'queueMicrotask', queueMicrotask); + defineOperation(global, "queueMicrotask", queueMicrotask); // Non-standard extensions: - defineOperation(global, 'clearImmediate', timers.clearImmediate); - defineOperation(global, 'setImmediate', timers.setImmediate); + defineOperation(global, "clearImmediate", timers.clearImmediate); + defineOperation(global, "setImmediate", timers.setImmediate); } // Set the per-Environment callback that will be called // when the TrackingTraceStateObserver updates trace state. // Note that when NODE_USE_V8_PLATFORM is true, the observer is // attached to the per-process TracingController. -const { setTraceCategoryStateUpdateHandler } = internalBinding('trace_events'); +const { setTraceCategoryStateUpdateHandler } = internalBinding("trace_events"); setTraceCategoryStateUpdateHandler(perThreadSetup.toggleTraceCategoryState); // process.allowedNodeEnvironmentFlags -Object.defineProperty(process, 'allowedNodeEnvironmentFlags', { +Object.defineProperty(process, "allowedNodeEnvironmentFlags", { get() { const flags = perThreadSetup.buildAllowedFlags(); process.allowedNodeEnvironmentFlags = flags; @@ -214,7 +212,7 @@ Object.defineProperty(process, 'allowedNodeEnvironmentFlags', { // If the user tries to set this to another value, override // this completely to that value. set(value) { - Object.defineProperty(this, 'allowedNodeEnvironmentFlags', { + Object.defineProperty(this, "allowedNodeEnvironmentFlags", { value, configurable: true, enumerable: true, @@ -228,13 +226,14 @@ Object.defineProperty(process, 'allowedNodeEnvironmentFlags', { // process.assert process.assert = deprecate( perThreadSetup.assert, - 'process.assert() is deprecated. Please use the `assert` module instead.', - 'DEP0100'); + "process.assert() is deprecated. Please use the `assert` module instead.", + "DEP0100" +); // TODO(joyeecheung): this property has not been well-maintained, should we // deprecate it in favor of a better API? const { isDebugBuild, hasOpenSSL, hasInspector } = config; -Object.defineProperty(process, 'features', { +Object.defineProperty(process, "features", { enumerable: true, writable: false, configurable: false, @@ -242,12 +241,12 @@ Object.defineProperty(process, 'features', { inspector: hasInspector, debug: isDebugBuild, uv: true, - ipv6: true, // TODO(bnoordhuis) ping libuv + ipv6: true, // TODO(bnoordhuis) ping libuv tls_alpn: hasOpenSSL, tls_sni: hasOpenSSL, tls_ocsp: hasOpenSSL, tls: hasOpenSSL, - cached_builtins: config.hasCachedBuiltins, + cached_builtins: config.hasCachedBuiltins } }); @@ -256,7 +255,7 @@ Object.defineProperty(process, 'features', { onGlobalUncaughtException, setUncaughtExceptionCaptureCallback, hasUncaughtExceptionCaptureCallback - } = require('internal/process/execution'); + } = require("internal/process/execution"); // For legacy reasons this is still called `_fatalException`, even // though it is now a global uncaught exception handler. @@ -265,13 +264,11 @@ Object.defineProperty(process, 'features', { // TODO(joyeecheung): investigate whether process._fatalException // can be deprecated. process._fatalException = onGlobalUncaughtException; - process.setUncaughtExceptionCaptureCallback = - setUncaughtExceptionCaptureCallback; - process.hasUncaughtExceptionCaptureCallback = - hasUncaughtExceptionCaptureCallback; + process.setUncaughtExceptionCaptureCallback = setUncaughtExceptionCaptureCallback; + process.hasUncaughtExceptionCaptureCallback = hasUncaughtExceptionCaptureCallback; } -const { emitWarning } = require('internal/process/warning'); +const { emitWarning } = require("internal/process/warning"); process.emitWarning = emitWarning; // We initialize the tick callbacks and the timer callbacks last during @@ -286,8 +283,8 @@ process.emitWarning = emitWarning; // TODO(joyeecheung): either remove it or make it public process._tickCallback = runNextTicks; - const { getTimerCallbacks } = require('internal/timers'); - const { setupTimers } = internalBinding('timers'); + const { getTimerCallbacks } = require("internal/timers"); + const { setupTimers } = internalBinding("timers"); const { processImmediate, processTimers } = getTimerCallbacks(runNextTicks); // Sets two per-Environment callbacks that will be run from libuv: // - processImmediate will be run in the callback of the per-Environment @@ -301,14 +298,11 @@ function setupPrepareStackTrace() { const { setEnhanceStackForFatalException, setPrepareStackTraceCallback - } = internalBinding('errors'); + } = internalBinding("errors"); const { prepareStackTrace, - fatalExceptionStackEnhancers: { - beforeInspector, - afterInspector - } - } = require('internal/errors'); + fatalExceptionStackEnhancers: { beforeInspector, afterInspector } + } = require("internal/errors"); // Tell our PrepareStackTraceCallback passed to the V8 API // to call prepareStackTrace(). setPrepareStackTraceCallback(prepareStackTrace); @@ -317,7 +311,7 @@ function setupPrepareStackTrace() { } function setupProcessObject() { - const EventEmitter = require('events'); + const EventEmitter = require("events"); const origProcProto = Object.getPrototypeOf(process); Object.setPrototypeOf(origProcProto, EventEmitter.prototype); EventEmitter.call(process); @@ -325,10 +319,10 @@ function setupProcessObject() { enumerable: false, writable: true, configurable: false, - value: 'process' + value: "process" }); // Make process globally available to users by putting it on the global proxy - Object.defineProperty(global, 'process', { + Object.defineProperty(global, "process", { value: process, enumerable: false, writable: true, @@ -337,19 +331,19 @@ function setupProcessObject() { } function setupProcessStdio(getStdout, getStdin, getStderr) { - Object.defineProperty(process, 'stdout', { + Object.defineProperty(process, "stdout", { configurable: true, enumerable: true, get: getStdout }); - Object.defineProperty(process, 'stderr', { + Object.defineProperty(process, "stderr", { configurable: true, enumerable: true, get: getStderr }); - Object.defineProperty(process, 'stdin', { + Object.defineProperty(process, "stdin", { configurable: true, enumerable: true, get: getStdin @@ -363,53 +357,61 @@ function setupProcessStdio(getStdout, getStdin, getStderr) { function setupGlobalProxy() { Object.defineProperty(global, Symbol.toStringTag, { - value: 'global', + value: "global", writable: false, enumerable: false, configurable: true }); function makeGetter(name) { - return deprecate(function() { - return this; - }, `'${name}' is deprecated, use 'global'`, 'DEP0016'); + return deprecate( + function() { + return this; + }, + `'${name}' is deprecated, use 'global'`, + "DEP0016" + ); } function makeSetter(name) { - return deprecate(function(value) { - Object.defineProperty(this, name, { - configurable: true, - writable: true, - enumerable: true, - value: value - }); - }, `'${name}' is deprecated, use 'global'`, 'DEP0016'); + return deprecate( + function(value) { + Object.defineProperty(this, name, { + configurable: true, + writable: true, + enumerable: true, + value: value + }); + }, + `'${name}' is deprecated, use 'global'`, + "DEP0016" + ); } Object.defineProperties(global, { GLOBAL: { configurable: true, - get: makeGetter('GLOBAL'), - set: makeSetter('GLOBAL') + get: makeGetter("GLOBAL"), + set: makeSetter("GLOBAL") }, root: { configurable: true, - get: makeGetter('root'), - set: makeSetter('root') + get: makeGetter("root"), + set: makeSetter("root") } }); } function setupBuffer() { - const { Buffer } = require('buffer'); - const bufferBinding = internalBinding('buffer'); + const { Buffer } = require("buffer"); + const bufferBinding = internalBinding("buffer"); // Only after this point can C++ use Buffer::New() bufferBinding.setBufferPrototype(Buffer.prototype); delete bufferBinding.setBufferPrototype; delete bufferBinding.zeroFill; - Object.defineProperty(global, 'Buffer', { + Object.defineProperty(global, "Buffer", { value: Buffer, enumerable: false, writable: true, @@ -418,16 +420,15 @@ function setupBuffer() { } function createGlobalConsole(consoleFromVM) { - const consoleFromNode = - require('internal/console/global'); + const consoleFromNode = require("internal/console/global"); if (config.hasInspector) { - const inspector = require('internal/util/inspector'); + const inspector = require("internal/util/inspector"); // This will be exposed by `require('inspector').console` later. inspector.consoleFromVM = consoleFromVM; // TODO(joyeecheung): postpone this until the first time inspector // is activated. inspector.wrapConsole(consoleFromNode, consoleFromVM); - const { setConsoleExtensionInstaller } = internalBinding('inspector'); + const { setConsoleExtensionInstaller } = internalBinding("inspector"); // Setup inspector command line API. setConsoleExtensionInstaller(inspector.installConsoleExtensions); } diff --git a/lib/internal/modules/cjs/loader.js b/lib/internal/modules/cjs/loader.js index 51adba608698a4..ed5b4a14e93ab7 100644 --- a/lib/internal/modules/cjs/loader.js +++ b/lib/internal/modules/cjs/loader.js @@ -19,7 +19,7 @@ // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE // USE OR OTHER DEALINGS IN THE SOFTWARE. -'use strict'; +"use strict"; const { JSON, @@ -27,45 +27,42 @@ const { ObjectPrototype, Reflect, SafeMap, - StringPrototype, + StringPrototype } = primordials; -const { NativeModule } = require('internal/bootstrap/loaders'); -const { pathToFileURL, fileURLToPath, URL } = require('internal/url'); -const { deprecate } = require('internal/util'); -const vm = require('vm'); -const assert = require('internal/assert'); -const fs = require('fs'); -const internalFS = require('internal/fs/utils'); -const path = require('path'); -const { - internalModuleReadJSON, - internalModuleStat -} = internalBinding('fs'); -const { safeGetenv } = internalBinding('credentials'); +const { NativeModule } = require("internal/bootstrap/loaders"); +const { pathToFileURL, fileURLToPath, URL } = require("internal/url"); +const { deprecate } = require("internal/util"); +const vm = require("vm"); +const assert = require("internal/assert"); +const fs = require("fs"); +const internalFS = require("internal/fs/utils"); +const path = require("path"); +const { internalModuleReadJSON, internalModuleStat } = internalBinding("fs"); +const { safeGetenv } = internalBinding("credentials"); const { makeRequireFunction, normalizeReferrerURL, stripBOM, loadNativeModule -} = require('internal/modules/cjs/helpers'); -const { getOptionValue } = require('internal/options'); -const preserveSymlinks = getOptionValue('--preserve-symlinks'); -const preserveSymlinksMain = getOptionValue('--preserve-symlinks-main'); -const experimentalModules = getOptionValue('--experimental-modules'); -const manifest = getOptionValue('--experimental-policy') ? - require('internal/process/policy').manifest : - null; -const { compileFunction } = internalBinding('contextify'); +} = require("internal/modules/cjs/helpers"); +const { getOptionValue } = require("internal/options"); +const preserveSymlinks = getOptionValue("--preserve-symlinks"); +const preserveSymlinksMain = getOptionValue("--preserve-symlinks-main"); +const experimentalModules = getOptionValue("--experimental-modules"); +const manifest = getOptionValue("--experimental-policy") + ? require("internal/process/policy").manifest + : null; +const { compileFunction } = internalBinding("contextify"); const { ERR_INVALID_ARG_VALUE, ERR_INVALID_OPT_VALUE, ERR_REQUIRE_ESM -} = require('internal/errors').codes; -const { validateString } = require('internal/validators'); -const pendingDeprecation = getOptionValue('--pending-deprecation'); -const experimentalExports = getOptionValue('--experimental-exports'); +} = require("internal/errors").codes; +const { validateString } = require("internal/validators"); +const pendingDeprecation = getOptionValue("--pending-deprecation"); +const experimentalExports = getOptionValue("--experimental-exports"); module.exports = { wrapSafe, Module }; @@ -77,9 +74,9 @@ const { CHAR_FORWARD_SLASH, CHAR_BACKWARD_SLASH, CHAR_COLON -} = require('internal/constants'); +} = require("internal/constants"); -const isWindows = process.platform === 'win32'; +const isWindows = process.platform === "win32"; const relativeResolveCache = Object.create(null); @@ -87,7 +84,7 @@ let requireDepth = 0; let statCache = null; function enrichCJSError(err) { - const stack = err.stack.split('\n'); + const stack = err.stack.split("\n"); const lineWithErr = stack[1]; @@ -96,15 +93,18 @@ function enrichCJSError(err) { usage. However, some cases are not matching, cases like import statement after a comment block and/or after a variable definition. */ - if (err.message.startsWith('Unexpected token \'export\'') || - (/^\s*import(?=[ {'"*])\s*(?![ (])/).test(lineWithErr)) { + if ( + err.message.startsWith("Unexpected token 'export'") || + /^\s*import(?=[ {'"*])\s*(?![ (])/.test(lineWithErr) + ) { process.emitWarning( 'To load an ES module, set "type": "module" in the package.json or use ' + - 'the .mjs extension.', + "the .mjs extension.", undefined, undefined, undefined, - true); + true + ); } } @@ -121,11 +121,10 @@ function stat(filename) { function updateChildren(parent, child, scan) { const children = parent && parent.children; - if (children && !(scan && children.includes(child))) - children.push(child); + if (children && !(scan && children.includes(child))) children.push(child); } -function Module(id = '', parent) { +function Module(id = "", parent) { this.id = id; this.path = path.dirname(id); this.exports = {}; @@ -160,8 +159,8 @@ let wrap = function(script) { }; const wrapper = [ - '(function (exports, require, module, __filename, __dirname) { ', - '\n});' + "(function (exports, require, module, __filename, __dirname) { ", + "\n});" ]; let wrapperProxy = new Proxy(wrapper, { @@ -176,7 +175,7 @@ let wrapperProxy = new Proxy(wrapper, { } }); -Object.defineProperty(Module, 'wrap', { +Object.defineProperty(Module, "wrap", { get() { return wrap; }, @@ -187,7 +186,7 @@ Object.defineProperty(Module, 'wrap', { } }); -Object.defineProperty(Module, 'wrapper', { +Object.defineProperty(Module, "wrapper", { get() { return wrapperProxy; }, @@ -198,8 +197,8 @@ Object.defineProperty(Module, 'wrapper', { } }); -const debug = require('internal/util/debuglog').debuglog('module'); -Module._debug = deprecate(debug, 'Module._debug is deprecated.', 'DEP0077'); +const debug = require("internal/util/debuglog").debuglog("module"); +Module._debug = deprecate(debug, "Module._debug is deprecated.", "DEP0077"); // Given a module name, and a list of paths to test, returns the first // matching file in the following precedence. @@ -215,7 +214,7 @@ Module._debug = deprecate(debug, 'Module._debug is deprecated.', 'DEP0077'); const packageJsonCache = new SafeMap(); function readPackage(requestPath) { - const jsonPath = path.resolve(requestPath, 'package.json'); + const jsonPath = path.resolve(requestPath, "package.json"); const existing = packageJsonCache.get(jsonPath); if (existing !== undefined) return existing; @@ -242,7 +241,7 @@ function readPackage(requestPath) { return filtered; } catch (e) { e.path = jsonPath; - e.message = 'Error parsing ' + jsonPath + ': ' + e.message; + e.message = "Error parsing " + jsonPath + ": " + e.message; throw e; } } @@ -254,8 +253,7 @@ function readPackageScope(checkPath) { (separatorIndex = checkPath.lastIndexOf(path.sep)) > rootSeparatorIndex ) { checkPath = checkPath.slice(0, separatorIndex); - if (checkPath.endsWith(path.sep + 'node_modules')) - return false; + if (checkPath.endsWith(path.sep + "node_modules")) return false; const pjson = readPackage(checkPath); if (pjson) return pjson; } @@ -276,33 +274,34 @@ function tryPackage(requestPath, exts, isMain, originalPath) { const pkg = readPackageMain(requestPath); if (!pkg) { - return tryExtensions(path.resolve(requestPath, 'index'), exts, isMain); + return tryExtensions(path.resolve(requestPath, "index"), exts, isMain); } const filename = path.resolve(requestPath, pkg); - let actual = tryFile(filename, isMain) || + let actual = + tryFile(filename, isMain) || tryExtensions(filename, exts, isMain) || - tryExtensions(path.resolve(filename, 'index'), exts, isMain); + tryExtensions(path.resolve(filename, "index"), exts, isMain); if (actual === false) { - actual = tryExtensions(path.resolve(requestPath, 'index'), exts, isMain); + actual = tryExtensions(path.resolve(requestPath, "index"), exts, isMain); if (!actual) { // eslint-disable-next-line no-restricted-syntax const err = new Error( `Cannot find module '${filename}'. ` + - 'Please verify that the package.json has a valid "main" entry' + 'Please verify that the package.json has a valid "main" entry' ); - err.code = 'MODULE_NOT_FOUND'; - err.path = path.resolve(requestPath, 'package.json'); + err.code = "MODULE_NOT_FOUND"; + err.path = path.resolve(requestPath, "package.json"); err.requestPath = originalPath; // TODO(BridgeAR): Add the requireStack as well. throw err; } else if (pendingDeprecation) { - const jsonPath = path.resolve(requestPath, 'package.json'); + const jsonPath = path.resolve(requestPath, "package.json"); process.emitWarning( `Invalid 'main' field in '${jsonPath}' of '${pkg}'. ` + - 'Please either fix that or report it to the module author', - 'DeprecationWarning', - 'DEP0128' + "Please either fix that or report it to the module author", + "DeprecationWarning", + "DEP0128" ); } } @@ -351,13 +350,13 @@ function findLongestRegisteredExtension(filename) { let currentExtension; let index; let startIndex = 0; - while ((index = name.indexOf('.', startIndex)) !== -1) { + while ((index = name.indexOf(".", startIndex)) !== -1) { startIndex = index + 1; if (index === 0) continue; // Skip dotfiles like .gitignore currentExtension = name.slice(index); if (Module._extensions[currentExtension]) return currentExtension; } - return '.js'; + return ".js"; } // This only applies to requests of a specific form: @@ -367,7 +366,7 @@ const EXPORTS_PATTERN = /^((?:@[^/\\%]+\/)?[^./\\%][^/\\%]*)(\/.*)?$/; function resolveExports(nmPath, request, absoluteRequest) { // The implementation's behavior is meant to mirror resolution in ESM. if (experimentalExports && !absoluteRequest) { - const [, name, expansion = ''] = + const [, name, expansion = ""] = StringPrototype.match(request, EXPORTS_PATTERN) || []; if (!name) { return path.resolve(nmPath, request); @@ -377,38 +376,57 @@ function resolveExports(nmPath, request, absoluteRequest) { const pkgExports = readPackageExports(basePath); const mappingKey = `.${expansion}`; - if (typeof pkgExports === 'object' && pkgExports !== null) { + if (typeof pkgExports === "object" && pkgExports !== null) { if (ObjectPrototype.hasOwnProperty(pkgExports, mappingKey)) { const mapping = pkgExports[mappingKey]; - return resolveExportsTarget(pathToFileURL(basePath + '/'), mapping, '', - basePath, mappingKey); + return resolveExportsTarget( + pathToFileURL(basePath + "/"), + mapping, + "", + basePath, + mappingKey + ); } - let dirMatch = ''; + let dirMatch = ""; for (const candidateKey of Object.keys(pkgExports)) { - if (candidateKey[candidateKey.length - 1] !== '/') continue; - if (candidateKey.length > dirMatch.length && - StringPrototype.startsWith(mappingKey, candidateKey)) { + if (candidateKey[candidateKey.length - 1] !== "/") continue; + if ( + candidateKey.length > dirMatch.length && + StringPrototype.startsWith(mappingKey, candidateKey) + ) { dirMatch = candidateKey; } } - if (dirMatch !== '') { + if (dirMatch !== "") { const mapping = pkgExports[dirMatch]; const subpath = StringPrototype.slice(mappingKey, dirMatch.length); - return resolveExportsTarget(pathToFileURL(basePath + '/'), mapping, - subpath, basePath, mappingKey); + return resolveExportsTarget( + pathToFileURL(basePath + "/"), + mapping, + subpath, + basePath, + mappingKey + ); } } - if (mappingKey === '.' && typeof pkgExports === 'string') { - return resolveExportsTarget(pathToFileURL(basePath + '/'), pkgExports, - '', basePath, mappingKey); + if (mappingKey === "." && typeof pkgExports === "string") { + return resolveExportsTarget( + pathToFileURL(basePath + "/"), + pkgExports, + "", + basePath, + mappingKey + ); } if (pkgExports != null) { // eslint-disable-next-line no-restricted-syntax - const e = new Error(`Package exports for '${basePath}' do not define ` + - `a '${mappingKey}' subpath`); - e.code = 'MODULE_NOT_FOUND'; + const e = new Error( + `Package exports for '${basePath}' do not define ` + + `a '${mappingKey}' subpath` + ); + e.code = "MODULE_NOT_FOUND"; throw e; } } @@ -417,58 +435,77 @@ function resolveExports(nmPath, request, absoluteRequest) { } function resolveExportsTarget(pkgPath, target, subpath, basePath, mappingKey) { - if (typeof target === 'string') { - if (target.startsWith('./') && - (subpath.length === 0 || target.endsWith('/'))) { + if (typeof target === "string") { + if ( + target.startsWith("./") && + (subpath.length === 0 || target.endsWith("/")) + ) { const resolvedTarget = new URL(target, pkgPath); const pkgPathPath = pkgPath.pathname; const resolvedTargetPath = resolvedTarget.pathname; - if (StringPrototype.startsWith(resolvedTargetPath, pkgPathPath) && - StringPrototype.indexOf(resolvedTargetPath, '/node_modules/', - pkgPathPath.length - 1) === -1) { + if ( + StringPrototype.startsWith(resolvedTargetPath, pkgPathPath) && + StringPrototype.indexOf( + resolvedTargetPath, + "/node_modules/", + pkgPathPath.length - 1 + ) === -1 + ) { const resolved = new URL(subpath, resolvedTarget); const resolvedPath = resolved.pathname; - if (StringPrototype.startsWith(resolvedPath, resolvedTargetPath) && - StringPrototype.indexOf(resolvedPath, '/node_modules/', - pkgPathPath.length - 1) === -1) { + if ( + StringPrototype.startsWith(resolvedPath, resolvedTargetPath) && + StringPrototype.indexOf( + resolvedPath, + "/node_modules/", + pkgPathPath.length - 1 + ) === -1 + ) { return fileURLToPath(resolved); } } } } else if (Array.isArray(target)) { for (const targetValue of target) { - if (typeof targetValue !== 'string') continue; + if (typeof targetValue !== "string") continue; try { - return resolveExportsTarget(pkgPath, targetValue, subpath, basePath, - mappingKey); + return resolveExportsTarget( + pkgPath, + targetValue, + subpath, + basePath, + mappingKey + ); } catch (e) { - if (e.code !== 'MODULE_NOT_FOUND') throw e; + if (e.code !== "MODULE_NOT_FOUND") throw e; } } } // eslint-disable-next-line no-restricted-syntax - const e = new Error(`Package exports for '${basePath}' do not define a ` + - `valid '${mappingKey}' target${subpath ? 'for ' + subpath : ''}`); - e.code = 'MODULE_NOT_FOUND'; + const e = new Error( + `Package exports for '${basePath}' do not define a ` + + `valid '${mappingKey}' target${subpath ? "for " + subpath : ""}` + ); + e.code = "MODULE_NOT_FOUND"; throw e; } Module._findPath = function(request, paths, isMain) { const absoluteRequest = path.isAbsolute(request); if (absoluteRequest) { - paths = ['']; + paths = [""]; } else if (!paths || paths.length === 0) { return false; } - const cacheKey = request + '\x00' + - (paths.length === 1 ? paths[0] : paths.join('\x00')); + const cacheKey = + request + "\x00" + (paths.length === 1 ? paths[0] : paths.join("\x00")); const entry = Module._pathCache[cacheKey]; - if (entry) - return entry; + if (entry) return entry; var exts; - var trailingSlash = request.length > 0 && + var trailingSlash = + request.length > 0 && request.charCodeAt(request.length - 1) === CHAR_FORWARD_SLASH; if (!trailingSlash) { trailingSlash = /(?:^|\/)\.?\.$/.test(request); @@ -484,7 +521,8 @@ Module._findPath = function(request, paths, isMain) { var rc = stat(basePath); if (!trailingSlash) { - if (rc === 0) { // File. + if (rc === 0) { + // File. if (!isMain) { if (preserveSymlinks) { filename = path.resolve(basePath); @@ -508,16 +546,15 @@ Module._findPath = function(request, paths, isMain) { if (!filename) { // Try it with each of the extensions - if (exts === undefined) - exts = Object.keys(Module._extensions); + if (exts === undefined) exts = Object.keys(Module._extensions); filename = tryExtensions(basePath, exts, isMain); } } - if (!filename && rc === 1) { // Directory. + if (!filename && rc === 1) { + // Directory. // try it with each of the extensions at "index" - if (exts === undefined) - exts = Object.keys(Module._extensions); + if (exts === undefined) exts = Object.keys(Module._extensions); filename = tryPackage(basePath, exts, isMain, request); } @@ -530,7 +567,7 @@ Module._findPath = function(request, paths, isMain) { }; // 'node_modules' character codes reversed -const nmChars = [ 115, 101, 108, 117, 100, 111, 109, 95, 101, 100, 111, 110 ]; +const nmChars = [115, 101, 108, 117, 100, 111, 109, 95, 101, 100, 111, 110]; const nmLen = nmChars.length; if (isWindows) { // 'from' is the __dirname of the module. @@ -544,9 +581,11 @@ if (isWindows) { // return root node_modules when path is 'D:\\'. // path.resolve will make sure from.length >=3 in Windows. - if (from.charCodeAt(from.length - 1) === CHAR_BACKWARD_SLASH && - from.charCodeAt(from.length - 2) === CHAR_COLON) - return [from + 'node_modules']; + if ( + from.charCodeAt(from.length - 1) === CHAR_BACKWARD_SLASH && + from.charCodeAt(from.length - 2) === CHAR_COLON + ) + return [from + "node_modules"]; const paths = []; var p = 0; @@ -558,11 +597,12 @@ if (isWindows) { // Use colon as an extra condition since we can get node_modules // path for drive root like 'C:\node_modules' and don't need to // parse drive name. - if (code === CHAR_BACKWARD_SLASH || - code === CHAR_FORWARD_SLASH || - code === CHAR_COLON) { - if (p !== nmLen) - paths.push(from.slice(0, last) + '\\node_modules'); + if ( + code === CHAR_BACKWARD_SLASH || + code === CHAR_FORWARD_SLASH || + code === CHAR_COLON + ) { + if (p !== nmLen) paths.push(from.slice(0, last) + "\\node_modules"); last = i; p = 0; } else if (p !== -1) { @@ -576,15 +616,15 @@ if (isWindows) { return paths; }; -} else { // posix +} else { + // posix // 'from' is the __dirname of the module. Module._nodeModulePaths = function(from) { // Guarantee that 'from' is absolute. from = path.resolve(from); // Return early not only to avoid unnecessary work, but to *avoid* returning // an array of two items for a root: [ '//node_modules', '/node_modules' ] - if (from === '/') - return ['/node_modules']; + if (from === "/") return ["/node_modules"]; // note: this approach *only* works when the path is guaranteed // to be absolute. Doing a fully-edge-case-correct path.split @@ -595,8 +635,7 @@ if (isWindows) { for (var i = from.length - 1; i >= 0; --i) { const code = from.charCodeAt(i); if (code === CHAR_FORWARD_SLASH) { - if (p !== nmLen) - paths.push(from.slice(0, last) + '/node_modules'); + if (p !== nmLen) paths.push(from.slice(0, last) + "/node_modules"); last = i; p = 0; } else if (p !== -1) { @@ -609,7 +648,7 @@ if (isWindows) { } // Append /node_modules to handle root paths. - paths.push('/node_modules'); + paths.push("/node_modules"); return paths; }; @@ -617,23 +656,24 @@ if (isWindows) { Module._resolveLookupPaths = function(request, parent) { if (NativeModule.canBeRequiredByUsers(request)) { - debug('looking for %j in []', request); + debug("looking for %j in []", request); return null; } // Check for node modules paths. - if (request.charAt(0) !== '.' || - (request.length > 1 && - request.charAt(1) !== '.' && - request.charAt(1) !== '/' && - (!isWindows || request.charAt(1) !== '\\'))) { - + if ( + request.charAt(0) !== "." || + (request.length > 1 && + request.charAt(1) !== "." && + request.charAt(1) !== "/" && + (!isWindows || request.charAt(1) !== "\\")) + ) { let paths = modulePaths; if (parent != null && parent.paths && parent.paths.length) { paths = parent.paths.concat(paths); } - debug('looking for %j in %j', request, paths); + debug("looking for %j in %j", request, paths); return paths.length > 0 ? paths : null; } @@ -641,16 +681,16 @@ Module._resolveLookupPaths = function(request, parent) { if (!parent || !parent.id || !parent.filename) { // Make require('./path/to/foo') work - normally the path is taken // from realpath(__filename) but with eval there is no filename - const mainPaths = ['.'].concat(Module._nodeModulePaths('.'), modulePaths); + const mainPaths = ["."].concat(Module._nodeModulePaths("."), modulePaths); - debug('looking for %j in %j', request, mainPaths); + debug("looking for %j in %j", request, mainPaths); return mainPaths; } - debug('RELATIVE: requested: %s from parent.id %s', request, parent.id); + debug("RELATIVE: requested: %s from parent.id %s", request, parent.id); const parentDir = [path.dirname(parent.filename)]; - debug('looking for %j', parentDir); + debug("looking for %j", parentDir); return parentDir; }; @@ -664,7 +704,7 @@ Module._resolveLookupPaths = function(request, parent) { Module._load = function(request, parent, isMain) { let relResolveCacheIdentifier; if (parent) { - debug('Module._load REQUEST %s parent: %s', request, parent.id); + debug("Module._load REQUEST %s parent: %s", request, parent.id); // Fast path for (lazy loaded) modules in the same directory. The indirect // caching is required to allow cache invalidation without changing the old // cache key names. @@ -696,7 +736,7 @@ Module._load = function(request, parent, isMain) { if (isMain) { process.mainModule = module; - module.id = '.'; + module.id = "."; } Module._cache[filename] = module; @@ -727,17 +767,18 @@ Module._resolveFilename = function(request, parent, isMain, options) { var paths; - if (typeof options === 'object' && options !== null) { + if (typeof options === "object" && options !== null) { if (Array.isArray(options.paths)) { - const isRelative = request.startsWith('./') || - request.startsWith('../') || - (isWindows && request.startsWith('.\\') || - request.startsWith('..\\')); + const isRelative = + request.startsWith("./") || + request.startsWith("../") || + ((isWindows && request.startsWith(".\\")) || + request.startsWith("..\\")); if (isRelative) { paths = options.paths; } else { - const fakeParent = new Module('', null); + const fakeParent = new Module("", null); paths = []; @@ -747,15 +788,14 @@ Module._resolveFilename = function(request, parent, isMain, options) { const lookupPaths = Module._resolveLookupPaths(request, fakeParent); for (var j = 0; j < lookupPaths.length; j++) { - if (!paths.includes(lookupPaths[j])) - paths.push(lookupPaths[j]); + if (!paths.includes(lookupPaths[j])) paths.push(lookupPaths[j]); } } } } else if (options.paths === undefined) { paths = Module._resolveLookupPaths(request, parent); } else { - throw new ERR_INVALID_OPT_VALUE('options.paths', options.paths); + throw new ERR_INVALID_OPT_VALUE("options.paths", options.paths); } } else { paths = Module._resolveLookupPaths(request, parent); @@ -765,28 +805,25 @@ Module._resolveFilename = function(request, parent, isMain, options) { const filename = Module._findPath(request, paths, isMain); if (!filename) { const requireStack = []; - for (var cursor = parent; - cursor; - cursor = cursor.parent) { + for (var cursor = parent; cursor; cursor = cursor.parent) { requireStack.push(cursor.filename || cursor.id); } let message = `Cannot find module '${request}'`; if (requireStack.length > 0) { - message = message + '\nRequire stack:\n- ' + requireStack.join('\n- '); + message = message + "\nRequire stack:\n- " + requireStack.join("\n- "); } // eslint-disable-next-line no-restricted-syntax var err = new Error(message); - err.code = 'MODULE_NOT_FOUND'; + err.code = "MODULE_NOT_FOUND"; err.requireStack = requireStack; throw err; } return filename; }; - // Given a file name, pass it to the proper extension handler. Module.prototype.load = function(filename) { - debug('load %j for module %j', filename, this.id); + debug("load %j for module %j", filename, this.id); assert(!this.loaded); this.filename = filename; @@ -802,34 +839,33 @@ Module.prototype.load = function(filename) { const module = ESMLoader.moduleMap.get(url); // Create module entry at load time to snapshot exports correctly const exports = this.exports; - if (module !== undefined) { // Called from cjs translator + if (module !== undefined) { + // Called from cjs translator if (module.reflect) { - module.reflect.onReady((reflect) => { + module.reflect.onReady(reflect => { reflect.exports.default.set(exports); }); } - } else { // preemptively cache + } else { + // preemptively cache ESMLoader.moduleMap.set( url, new ModuleJob(ESMLoader, url, async () => { - return createDynamicModule( - [], ['default'], url, (reflect) => { - reflect.exports.default.set(exports); - }); + return createDynamicModule([], ["default"], url, reflect => { + reflect.exports.default.set(exports); + }); }) ); } } }; - // Loads a module at the given file path. Returns that module's // `exports` property. Module.prototype.require = function(id) { - validateString(id, 'id'); - if (id === '') { - throw new ERR_INVALID_ARG_VALUE('id', id, - 'must be a non-empty string'); + validateString(id, "id"); + if (id === "") { + throw new ERR_INVALID_ARG_VALUE("id", id, "must be a non-empty string"); } requireDepth++; try { @@ -839,7 +875,6 @@ Module.prototype.require = function(id) { } }; - // Resolved path to process.argv[1] will be lazily placed here // (needed for setting breakpoint when called with --inspect-brk) var resolvedArgv; @@ -852,10 +887,12 @@ function wrapSafe(filename, content) { filename, lineOffset: 0, displayErrors: true, - importModuleDynamically: experimentalModules ? async (specifier) => { - const loader = await asyncESM.loaderPromise; - return loader.import(specifier, normalizeReferrerURL(filename)); - } : undefined, + importModuleDynamically: experimentalModules + ? async specifier => { + const loader = await asyncESM.loaderPromise; + return loader.import(specifier, normalizeReferrerURL(filename)); + } + : undefined }); } let compiled; @@ -869,13 +906,7 @@ function wrapSafe(filename, content) { false, undefined, [], - [ - 'exports', - 'require', - 'module', - '__filename', - '__dirname', - ] + ["exports", "require", "module", "__filename", "__dirname"] ); } catch (err) { if (experimentalModules) { @@ -885,9 +916,9 @@ function wrapSafe(filename, content) { } if (experimentalModules) { - const { callbackMap } = internalBinding('module_wrap'); + const { callbackMap } = internalBinding("module_wrap"); callbackMap.set(compiled.cacheKey, { - importModuleDynamically: async (specifier) => { + importModuleDynamically: async specifier => { const loader = await asyncESM.loaderPromise; return loader.import(specifier, normalizeReferrerURL(filename)); } @@ -913,20 +944,20 @@ Module.prototype._compile = function(content, filename) { const compiledWrapper = wrapSafe(filename, content); var inspectorWrapper = null; - if (getOptionValue('--inspect-brk') && process._eval == null) { + if (getOptionValue("--inspect-brk") && process._eval == null) { if (!resolvedArgv) { // We enter the repl if we're not given a filename argument. if (process.argv[1]) { resolvedArgv = Module._resolveFilename(process.argv[1], null, false); } else { - resolvedArgv = 'repl'; + resolvedArgv = "repl"; } } // Set breakpoint on module start if (!hasPausedEntry && filename === resolvedArgv) { hasPausedEntry = true; - inspectorWrapper = internalBinding('inspector').callAndPauseOnStart; + inspectorWrapper = internalBinding("inspector").callAndPauseOnStart; } } const dirname = path.dirname(filename); @@ -937,33 +968,44 @@ Module.prototype._compile = function(content, filename) { const module = this; if (requireDepth === 0) statCache = new Map(); if (inspectorWrapper) { - result = inspectorWrapper(compiledWrapper, thisValue, exports, - require, module, filename, dirname); + result = inspectorWrapper( + compiledWrapper, + thisValue, + exports, + require, + module, + filename, + dirname + ); } else { - result = compiledWrapper.call(thisValue, exports, require, module, - filename, dirname); + result = compiledWrapper.call( + thisValue, + exports, + require, + module, + filename, + dirname + ); } if (requireDepth === 0) statCache = null; return result; }; - // Native extension for .js -Module._extensions['.js'] = function(module, filename) { - if (filename.endsWith('.js')) { +Module._extensions[".js"] = function(module, filename) { + if (filename.endsWith(".js")) { const pkg = readPackageScope(filename); - if (pkg && pkg.type === 'module') { + if (pkg && pkg.type === "module") { throw new ERR_REQUIRE_ESM(filename); } } - const content = fs.readFileSync(filename, 'utf8'); + const content = fs.readFileSync(filename, "utf8"); module._compile(content, filename); }; - // Native extension for .json -Module._extensions['.json'] = function(module, filename) { - const content = fs.readFileSync(filename, 'utf8'); +Module._extensions[".json"] = function(module, filename) { + const content = fs.readFileSync(filename, "utf8"); if (manifest) { const moduleURL = pathToFileURL(filename); @@ -973,14 +1015,13 @@ Module._extensions['.json'] = function(module, filename) { try { module.exports = JSON.parse(stripBOM(content)); } catch (err) { - err.message = filename + ': ' + err.message; + err.message = filename + ": " + err.message; throw err; } }; - // Native extension for .node -Module._extensions['.node'] = function(module, filename) { +Module._extensions[".node"] = function(module, filename) { if (manifest) { const content = fs.readFileSync(filename); const moduleURL = pathToFileURL(filename); @@ -990,7 +1031,7 @@ Module._extensions['.node'] = function(module, filename) { return process.dlopen(module, path.toNamespacedPath(filename)); }; -Module._extensions['.mjs'] = function(module, filename) { +Module._extensions[".mjs"] = function(module, filename) { throw new ERR_REQUIRE_ESM(filename); }; @@ -998,15 +1039,16 @@ Module._extensions['.mjs'] = function(module, filename) { Module.runMain = function() { // Load the main module--the command line argument. if (experimentalModules) { - asyncESM.loaderPromise.then((loader) => { - return loader.import(pathToFileURL(process.argv[1]).pathname); - }) - .catch((e) => { - internalBinding('errors').triggerUncaughtException( - e, - true /* fromPromise */ - ); - }); + asyncESM.loaderPromise + .then(loader => { + return loader.import(pathToFileURL(process.argv[1]).pathname); + }) + .catch(e => { + internalBinding("errors").triggerUncaughtException( + e, + true /* fromPromise */ + ); + }); return; } Module._load(process.argv[1], null, true); @@ -1015,11 +1057,9 @@ Module.runMain = function() { function createRequireFromPath(filename) { // Allow a directory to be passed as the filename const trailingSlash = - filename.endsWith('/') || (isWindows && filename.endsWith('\\')); + filename.endsWith("/") || (isWindows && filename.endsWith("\\")); - const proxyPath = trailingSlash ? - path.join(filename, 'noop.js') : - filename; + const proxyPath = trailingSlash ? path.join(filename, "noop.js") : filename; const m = new Module(proxyPath); m.filename = proxyPath; @@ -1030,27 +1070,28 @@ function createRequireFromPath(filename) { Module.createRequireFromPath = deprecate( createRequireFromPath, - 'Module.createRequireFromPath() is deprecated. ' + - 'Use Module.createRequire() instead.', - 'DEP0130' + "Module.createRequireFromPath() is deprecated. " + + "Use Module.createRequire() instead.", + "DEP0130" ); -const createRequireError = 'must be a file URL object, file URL string, or ' + - 'absolute path string'; +const createRequireError = + "must be a file URL object, file URL string, or " + "absolute path string"; function createRequire(filename) { let filepath; - if (filename instanceof URL || - (typeof filename === 'string' && !path.isAbsolute(filename))) { + if ( + filename instanceof URL || + (typeof filename === "string" && !path.isAbsolute(filename)) + ) { try { filepath = fileURLToPath(filename); } catch { - throw new ERR_INVALID_ARG_VALUE('filename', filename, - createRequireError); + throw new ERR_INVALID_ARG_VALUE("filename", filename, createRequireError); } - } else if (typeof filename !== 'string') { - throw new ERR_INVALID_ARG_VALUE('filename', filename, createRequireError); + } else if (typeof filename !== "string") { + throw new ERR_INVALID_ARG_VALUE("filename", filename, createRequireError); } else { filepath = filename; } @@ -1066,8 +1107,8 @@ Module._initPaths = function() { homeDir = process.env.USERPROFILE; nodePath = process.env.NODE_PATH; } else { - homeDir = safeGetenv('HOME'); - nodePath = safeGetenv('NODE_PATH'); + homeDir = safeGetenv("HOME"); + nodePath = safeGetenv("NODE_PATH"); } // $PREFIX/lib/node, where $PREFIX is the root of the Node.js installation. @@ -1075,21 +1116,24 @@ Module._initPaths = function() { // process.execPath is $PREFIX/bin/node except on Windows where it is // $PREFIX\node.exe. if (isWindows) { - prefixDir = path.resolve(process.execPath, '..'); + prefixDir = path.resolve(process.execPath, ".."); } else { - prefixDir = path.resolve(process.execPath, '..', '..'); + prefixDir = path.resolve(process.execPath, "..", ".."); } - var paths = [path.resolve(prefixDir, 'lib', 'node')]; + var paths = [path.resolve(prefixDir, "lib", "node")]; if (homeDir) { - paths.unshift(path.resolve(homeDir, '.node_libraries')); - paths.unshift(path.resolve(homeDir, '.node_modules')); + paths.unshift(path.resolve(homeDir, ".node_libraries")); + paths.unshift(path.resolve(homeDir, ".node_modules")); } if (nodePath) { - paths = nodePath.split(path.delimiter).filter(function pathsFilterCB(path) { - return !!path; - }).concat(paths); + paths = nodePath + .split(path.delimiter) + .filter(function pathsFilterCB(path) { + return !!path; + }) + .concat(paths); } modulePaths = paths; @@ -1099,22 +1143,20 @@ Module._initPaths = function() { }; Module._preloadModules = function(requests) { - if (!Array.isArray(requests)) - return; + if (!Array.isArray(requests)) return; // Preloaded modules have a dummy parent module which is deemed to exist // in the current working directory. This seeds the search path for // preloaded modules. - const parent = new Module('internal/preload', null); + const parent = new Module("internal/preload", null); try { parent.paths = Module._nodeModulePaths(process.cwd()); } catch (e) { - if (e.code !== 'ENOENT') { + if (e.code !== "ENOENT") { throw e; } } - for (var n = 0; n < requests.length; n++) - parent.require(requests[n]); + for (var n = 0; n < requests.length; n++) parent.require(requests[n]); }; // Backwards compatibility @@ -1122,8 +1164,7 @@ Module.Module = Module; // We have to load the esm things after module.exports! if (experimentalModules) { - asyncESM = require('internal/process/esm_loader'); - ModuleJob = require('internal/modules/esm/module_job'); - createDynamicModule = require( - 'internal/modules/esm/create_dynamic_module'); + asyncESM = require("internal/process/esm_loader"); + ModuleJob = require("internal/modules/esm/module_job"); + createDynamicModule = require("internal/modules/esm/create_dynamic_module"); } diff --git a/lib/repl.js b/lib/repl.js index a6123d93896d06..49f9d06499074e 100644 --- a/lib/repl.js +++ b/lib/repl.js @@ -40,7 +40,7 @@ * repl.start("node > ").context.foo = "stdin is fun"; */ -'use strict'; +"use strict"; const { Math, Object, ObjectPrototype } = primordials; @@ -48,54 +48,43 @@ const { builtinLibs, makeRequireFunction, addBuiltinLibsToObject -} = require('internal/modules/cjs/helpers'); +} = require("internal/modules/cjs/helpers"); const { isIdentifierStart, isIdentifierChar -} = require('internal/deps/acorn/acorn/dist/acorn'); -const { - decorateErrorStack, - isError, - deprecate -} = require('internal/util'); -const { inspect } = require('internal/util/inspect'); -const Stream = require('stream'); -const vm = require('vm'); -const path = require('path'); -const fs = require('fs'); -const { Interface } = require('readline'); -const { Console } = require('console'); -const CJSModule = require('internal/modules/cjs/loader').Module; -const domain = require('domain'); -const debug = require('internal/util/debuglog').debuglog('repl'); +} = require("internal/deps/acorn/acorn/dist/acorn"); +const { decorateErrorStack, isError, deprecate } = require("internal/util"); +const { inspect } = require("internal/util/inspect"); +const Stream = require("stream"); +const vm = require("vm"); +const path = require("path"); +const fs = require("fs"); +const { Interface } = require("readline"); +const { Console } = require("console"); +const CJSModule = require("internal/modules/cjs/loader").Module; +const domain = require("domain"); +const debug = require("internal/util/debuglog").debuglog("repl"); const { ERR_CANNOT_WATCH_SIGINT, ERR_INVALID_ARG_TYPE, ERR_INVALID_REPL_EVAL_CONFIG, ERR_INVALID_REPL_INPUT, ERR_SCRIPT_EXECUTION_INTERRUPTED -} = require('internal/errors').codes; -const { sendInspectorCommand } = require('internal/util/inspector'); -const experimentalREPLAwait = require('internal/options').getOptionValue( - '--experimental-repl-await' +} = require("internal/errors").codes; +const { sendInspectorCommand } = require("internal/util/inspector"); +const experimentalREPLAwait = require("internal/options").getOptionValue( + "--experimental-repl-await" ); -const { - isRecoverableError, - kStandaloneREPL -} = require('internal/repl/utils'); +const { isRecoverableError, kStandaloneREPL } = require("internal/repl/utils"); const { getOwnNonIndexProperties, - propertyFilter: { - ALL_PROPERTIES, - SKIP_SYMBOLS - } -} = internalBinding('util'); -const { - startSigintWatchdog, - stopSigintWatchdog -} = internalBinding('contextify'); + propertyFilter: { ALL_PROPERTIES, SKIP_SYMBOLS } +} = internalBinding("util"); +const { startSigintWatchdog, stopSigintWatchdog } = internalBinding( + "contextify" +); -const history = require('internal/repl/history'); +const history = require("internal/repl/history"); // Lazy-loaded. let processTopLevelAwait; @@ -104,20 +93,20 @@ const parentModule = module; const replMap = new WeakMap(); const domainSet = new WeakSet(); -const kBufferedCommandSymbol = Symbol('bufferedCommand'); -const kContextId = Symbol('contextId'); +const kBufferedCommandSymbol = Symbol("bufferedCommand"); +const kContextId = Symbol("contextId"); let addedNewListener = false; try { // Hack for require.resolve("./relative") to work properly. - module.filename = path.resolve('repl'); + module.filename = path.resolve("repl"); } catch { // path.resolve('repl') fails when the current working directory has been // deleted. Fall back to the directory name of the (absolute) executable // path. It's not really correct but what are the alternatives? const dirname = path.dirname(process.execPath); - module.filename = path.resolve(dirname, 'repl'); + module.filename = path.resolve(dirname, "repl"); } // Hack for repl require to work properly with node_modules folders @@ -126,28 +115,32 @@ module.paths = CJSModule._nodeModulePaths(module.filename); // This is the default "writer" value, if none is passed in the REPL options, // and it can be overridden by custom print functions, such as `probe` or // `eyes.js`. -const writer = exports.writer = (obj) => inspect(obj, writer.options); +const writer = (exports.writer = obj => inspect(obj, writer.options)); writer.options = { ...inspect.defaultOptions, showProxy: true }; exports._builtinLibs = builtinLibs; -function REPLServer(prompt, - stream, - eval_, - useGlobal, - ignoreUndefined, - replMode) { +function REPLServer( + prompt, + stream, + eval_, + useGlobal, + ignoreUndefined, + replMode +) { if (!(this instanceof REPLServer)) { - return new REPLServer(prompt, - stream, - eval_, - useGlobal, - ignoreUndefined, - replMode); + return new REPLServer( + prompt, + stream, + eval_, + useGlobal, + ignoreUndefined, + replMode + ); } let options; - if (prompt !== null && typeof prompt === 'object') { + if (prompt !== null && typeof prompt === "object") { // An options object was given. options = { ...prompt }; stream = options.stream || options.socket; @@ -212,17 +205,20 @@ function REPLServer(prompt, // domains. Otherwise we'd have to add a single listener to each REPL instance // and that could trigger the `MaxListenersExceededWarning`. if (!options[kStandaloneREPL] && !addedNewListener) { - process.prependListener('newListener', (event, listener) => { - if (event === 'uncaughtException' && - process.domain && - listener.name !== 'domainUncaughtExceptionClear' && - domainSet.has(process.domain)) { + process.prependListener("newListener", (event, listener) => { + if ( + event === "uncaughtException" && + process.domain && + listener.name !== "domainUncaughtExceptionClear" && + domainSet.has(process.domain) + ) { // Throw an error so that the event will not be added and the current // domain takes over. That way the user is notified about the error // and the current code evaluation is stopped, just as any other code // that contains an error. throw new ERR_INVALID_REPL_INPUT( - 'Listeners for `uncaughtException` cannot be used in the REPL'); + "Listeners for `uncaughtException` cannot be used in the REPL" + ); } }); addedNewListener = true; @@ -231,20 +227,24 @@ function REPLServer(prompt, domainSet.add(this._domain); let rli = this; - Object.defineProperty(this, 'rli', { - get: deprecate(() => rli, - 'REPLServer.rli is deprecated', 'DEP0124'), - set: deprecate((val) => rli = val, - 'REPLServer.rli is deprecated', 'DEP0124'), + Object.defineProperty(this, "rli", { + get: deprecate(() => rli, "REPLServer.rli is deprecated", "DEP0124"), + set: deprecate( + val => (rli = val), + "REPLServer.rli is deprecated", + "DEP0124" + ), enumerable: true, configurable: true }); - const savedRegExMatches = ['', '', '', '', '', '', '', '', '', '']; - const sep = '\u0000\u0000\u0000'; - const regExMatcher = new RegExp(`^${sep}(.*)${sep}(.*)${sep}(.*)${sep}(.*)` + - `${sep}(.*)${sep}(.*)${sep}(.*)${sep}(.*)` + - `${sep}(.*)$`); + const savedRegExMatches = ["", "", "", "", "", "", "", "", "", ""]; + const sep = "\u0000\u0000\u0000"; + const regExMatcher = new RegExp( + `^${sep}(.*)${sep}(.*)${sep}(.*)${sep}(.*)` + + `${sep}(.*)${sep}(.*)${sep}(.*)${sep}(.*)` + + `${sep}(.*)$` + ); eval_ = eval_ || defaultEval; @@ -260,16 +260,16 @@ function REPLServer(prompt, if (!paused) return; paused = false; let entry; - while (entry = pausedBuffer.shift()) { + while ((entry = pausedBuffer.shift())) { const [type, payload] = entry; switch (type) { - case 'key': { + case "key": { const [d, key] = payload; self._ttyWrite(d, key); break; } - case 'close': - self.emit('exit'); + case "close": + self.emit("exit"); break; } if (paused) { @@ -279,11 +279,11 @@ function REPLServer(prompt, } function defaultEval(code, context, file, cb) { - const { getOptionValue } = require('internal/options'); - const experimentalModules = getOptionValue('--experimental-modules'); - const asyncESM = experimentalModules ? - require('internal/process/esm_loader') : - null; + const { getOptionValue } = require("internal/options"); + const experimentalModules = getOptionValue("--experimental-modules"); + const asyncESM = experimentalModules + ? require("internal/process/esm_loader") + : null; let result, script, wrappedErr; let err = null; @@ -301,9 +301,9 @@ function REPLServer(prompt, wrappedCmd = true; } - if (experimentalREPLAwait && code.includes('await')) { + if (experimentalREPLAwait && code.includes("await")) { if (processTopLevelAwait === undefined) { - ({ processTopLevelAwait } = require('internal/repl/await')); + ({ processTopLevelAwait } = require("internal/repl/await")); } const potentialWrappedCode = processTopLevelAwait(code); @@ -315,19 +315,16 @@ function REPLServer(prompt, } // First, create the Script object to check the syntax - if (code === '\n') - return cb(null); + if (code === "\n") return cb(null); let pwd; try { - const { pathToFileURL } = require('url'); + const { pathToFileURL } = require("url"); pwd = pathToFileURL(process.cwd()).href; - } catch { - } + } catch {} while (true) { try { - if (!/^\s*$/.test(code) && - self.replMode === exports.REPL_MODE_STRICT) { + if (!/^\s*$/.test(code) && self.replMode === exports.REPL_MODE_STRICT) { // "void 0" keeps the repl from returning "use strict" as the result // value for statements and declarations that don't return a value. code = `'use strict'; void 0;\n${code}`; @@ -335,14 +332,14 @@ function REPLServer(prompt, script = vm.createScript(code, { filename: file, displayErrors: true, - importModuleDynamically: experimentalModules ? - async (specifier) => { - return (await asyncESM.loaderPromise).import(specifier, pwd); - } : - undefined + importModuleDynamically: experimentalModules + ? async specifier => { + return (await asyncESM.loaderPromise).import(specifier, pwd); + } + : undefined }); } catch (e) { - debug('parse error %j', code, e); + debug("parse error %j", code, e); if (wrappedCmd) { // Unwrap and try again wrappedCmd = false; @@ -353,10 +350,8 @@ function REPLServer(prompt, } // Preserve original error for wrapped command const error = wrappedErr || e; - if (isRecoverableError(error, code)) - err = new Recoverable(error); - else - err = error; + if (isRecoverableError(error, code)) err = new Recoverable(error); + else err = error; } break; } @@ -385,8 +380,7 @@ function REPLServer(prompt, if (self.breakEvalOnSigint) { // Start the SIGINT watchdog before entering raw mode so that a very // quick Ctrl+C doesn't lead to aborting the process completely. - if (!startSigintWatchdog()) - throw new ERR_CANNOT_WATCH_SIGINT(); + if (!startSigintWatchdog()) throw new ERR_CANNOT_WATCH_SIGINT(); previouslyInRawMode = self._setRawMode(false); } @@ -410,7 +404,7 @@ function REPLServer(prompt, // Returns true if there were pending SIGINTs *after* the script // has terminated without being interrupted itself. if (stopSigintWatchdog()) { - self.emit('SIGINT'); + self.emit("SIGINT"); } } } @@ -418,8 +412,8 @@ function REPLServer(prompt, err = e; if (process.domain) { - debug('not recoverable, send to domain'); - process.domain.emit('error', err); + debug("not recoverable, send to domain"); + process.domain.emit("error", err); process.domain.exit(); return; } @@ -443,21 +437,26 @@ function REPLServer(prompt, promise = Promise.race([promise, interrupt]); } - promise.then((result) => { - finishExecution(null, result); - }, (err) => { - if (err && process.domain) { - debug('not recoverable, send to domain'); - process.domain.emit('error', err); - process.domain.exit(); - return; - } - finishExecution(err); - }).finally(() => { - // Remove prioritized SIGINT listener if it was not called. - prioritizedSigintQueue.delete(sigintListener); - unpause(); - }); + promise + .then( + result => { + finishExecution(null, result); + }, + err => { + if (err && process.domain) { + debug("not recoverable, send to domain"); + process.domain.emit("error", err); + process.domain.exit(); + return; + } + finishExecution(err); + } + ) + .finally(() => { + // Remove prioritized SIGINT listener if it was not called. + prioritizedSigintQueue.delete(sigintListener); + unpause(); + }); } } @@ -468,11 +467,11 @@ function REPLServer(prompt, self.eval = self._domain.bind(eval_); - self._domain.on('error', function debugDomainError(e) { - debug('domain error'); - let errStack = ''; + self._domain.on("error", function debugDomainError(e) { + debug("domain error"); + let errStack = ""; - if (typeof e === 'object' && e !== null) { + if (typeof e === "object" && e !== null) { const pstrace = Error.prepareStackTrace; Error.prepareStackTrace = prepareStackTrace(pstrace); decorateErrorStack(e); @@ -485,20 +484,22 @@ function REPLServer(prompt, if (isError(e)) { if (e.stack) { - if (e.name === 'SyntaxError') { + if (e.name === "SyntaxError") { // Remove stack trace. e.stack = e.stack - .replace(/^repl:\d+\r?\n/, '') - .replace(/^\s+at\s.*\n?/gm, ''); + .replace(/^repl:\d+\r?\n/, "") + .replace(/^\s+at\s.*\n?/gm, ""); } else if (self.replMode === exports.REPL_MODE_STRICT) { - e.stack = e.stack.replace(/(\s+at\s+repl:)(\d+)/, - (_, pre, line) => pre + (line - 1)); + e.stack = e.stack.replace( + /(\s+at\s+repl:)(\d+)/, + (_, pre, line) => pre + (line - 1) + ); } } errStack = self.writer(e); // Remove one line error braces to keep the old style in place. - if (errStack[errStack.length - 1] === ']') { + if (errStack[errStack.length - 1] === "]") { errStack = errStack.slice(1, -1); } } @@ -509,19 +510,21 @@ function REPLServer(prompt, } const top = replMap.get(self); - if (options[kStandaloneREPL] && - process.listenerCount('uncaughtException') !== 0) { + if ( + options[kStandaloneREPL] && + process.listenerCount("uncaughtException") !== 0 + ) { process.nextTick(() => { - process.emit('uncaughtException', e); + process.emit("uncaughtException", e); top.clearBufferedCommand(); top.lines.level = []; top.displayPrompt(); }); } else { - if (errStack === '') { + if (errStack === "") { errStack = `Thrown: ${self.writer(e)}\n`; } else { - const ln = errStack.endsWith('\n') ? '' : '\n'; + const ln = errStack.endsWith("\n") ? "" : "\n"; errStack = `Thrown:\n${errStack}${ln}`; } top.outputStream.write(errStack); @@ -535,23 +538,30 @@ function REPLServer(prompt, self.lines.level = []; self.clearBufferedCommand(); - Object.defineProperty(this, 'bufferedCommand', { - get: deprecate(() => self[kBufferedCommandSymbol], - 'REPLServer.bufferedCommand is deprecated', - 'DEP0074'), - set: deprecate((val) => self[kBufferedCommandSymbol] = val, - 'REPLServer.bufferedCommand is deprecated', - 'DEP0074'), + Object.defineProperty(this, "bufferedCommand", { + get: deprecate( + () => self[kBufferedCommandSymbol], + "REPLServer.bufferedCommand is deprecated", + "DEP0074" + ), + set: deprecate( + val => (self[kBufferedCommandSymbol] = val), + "REPLServer.bufferedCommand is deprecated", + "DEP0074" + ), enumerable: true }); // Figure out which "complete" function to use. - self.completer = (typeof options.completer === 'function') ? - options.completer : completer; + self.completer = + typeof options.completer === "function" ? options.completer : completer; function completer(text, cb) { - complete.call(self, text, self.editorMode ? - self.completeOnEditorMode(cb) : cb); + complete.call( + self, + text, + self.editorMode ? self.completeOnEditorMode(cb) : cb + ); } Interface.call(this, { @@ -574,13 +584,13 @@ function REPLServer(prompt, writer.options.colors = self.useColors; if (options[kStandaloneREPL]) { - Object.defineProperty(inspect, 'replDefaults', { + Object.defineProperty(inspect, "replDefaults", { get() { return writer.options; }, set(options) { - if (options === null || typeof options !== 'object') { - throw new ERR_INVALID_ARG_TYPE('options', 'Object', options); + if (options === null || typeof options !== "object") { + throw new ERR_INVALID_ARG_TYPE("options", "Object", options); } return Object.assign(writer.options, options); }, @@ -593,10 +603,10 @@ function REPLServer(prompt, function filterInternalStackFrames(structuredStack) { // Search from the bottom of the call stack to // find the first frame with a null function name - if (typeof structuredStack !== 'object') - return structuredStack; - const idx = structuredStack.reverse().findIndex( - (frame) => frame.getFunctionName() === null); + if (typeof structuredStack !== "object") return structuredStack; + const idx = structuredStack + .reverse() + .findIndex(frame => frame.getFunctionName() === null); // If found, get rid of it and everything below it structuredStack = structuredStack.splice(idx + 1); @@ -610,7 +620,7 @@ function REPLServer(prompt, return fn(error, frames); } frames.push(error); - return frames.reverse().join('\n at '); + return frames.reverse().join("\n at "); }; } @@ -625,21 +635,22 @@ function REPLServer(prompt, self.parseREPLKeyword = deprecate( _parseREPLKeyword, - 'REPLServer.parseREPLKeyword() is deprecated', - 'DEP0075'); + "REPLServer.parseREPLKeyword() is deprecated", + "DEP0075" + ); - self.on('close', function emitExit() { + self.on("close", function emitExit() { if (paused) { - pausedBuffer.push(['close']); + pausedBuffer.push(["close"]); return; } - self.emit('exit'); + self.emit("exit"); }); let sawSIGINT = false; let sawCtrlD = false; const prioritizedSigintQueue = new Set(); - self.on('SIGINT', function onSigInt() { + self.on("SIGINT", function onSigInt() { if (prioritizedSigintQueue.size > 0) { for (const task of prioritizedSigintQueue) { task(); @@ -658,7 +669,7 @@ function REPLServer(prompt, sawSIGINT = false; return; } - self.output.write('(To exit, press ^C again or ^D or type .exit)\n'); + self.output.write("(To exit, press ^C again or ^D or type .exit)\n"); sawSIGINT = true; } else { sawSIGINT = false; @@ -669,13 +680,13 @@ function REPLServer(prompt, self.displayPrompt(); }); - self.on('line', function onLine(cmd) { - debug('line %j', cmd); - cmd = cmd || ''; + self.on("line", function onLine(cmd) { + debug("line %j", cmd); + cmd = cmd || ""; sawSIGINT = false; if (self.editorMode) { - self[kBufferedCommandSymbol] += cmd + '\n'; + self[kBufferedCommandSymbol] += cmd + "\n"; // code alignment const matches = self._sawKeyPress ? cmd.match(/^\s+/) : null; @@ -695,8 +706,11 @@ function REPLServer(prompt, // Check to see if a REPL keyword was used. If it returns true, // display next prompt and return. if (trimmedCmd) { - if (trimmedCmd.charAt(0) === '.' && trimmedCmd.charAt(1) !== '.' && - Number.isNaN(parseFloat(trimmedCmd))) { + if ( + trimmedCmd.charAt(0) === "." && + trimmedCmd.charAt(1) !== "." && + Number.isNaN(parseFloat(trimmedCmd)) + ) { const matches = trimmedCmd.match(/^\.([^\s]+)\s*(.*)$/); const keyword = matches && matches[1]; const rest = matches && matches[2]; @@ -704,26 +718,28 @@ function REPLServer(prompt, return; } if (!self[kBufferedCommandSymbol]) { - self.outputStream.write('Invalid REPL keyword\n'); + self.outputStream.write("Invalid REPL keyword\n"); finish(null); return; } } } - const evalCmd = self[kBufferedCommandSymbol] + cmd + '\n'; + const evalCmd = self[kBufferedCommandSymbol] + cmd + "\n"; - debug('eval %j', evalCmd); - self.eval(evalCmd, self.context, 'repl', finish); + debug("eval %j", evalCmd); + self.eval(evalCmd, self.context, "repl", finish); function finish(e, ret) { - debug('finish', e, ret); + debug("finish", e, ret); _memory.call(self, cmd); - if (e && !self[kBufferedCommandSymbol] && cmd.trim().startsWith('npm ')) { - self.outputStream.write('npm should be run outside of the ' + - 'node repl, in your normal shell.\n' + - '(Press Control-D to exit.)\n'); + if (e && !self[kBufferedCommandSymbol] && cmd.trim().startsWith("npm ")) { + self.outputStream.write( + "npm should be run outside of the " + + "node repl, in your normal shell.\n" + + "(Press Control-D to exit.)\n" + ); self.displayPrompt(); return; } @@ -735,11 +751,11 @@ function REPLServer(prompt, // { // ... x: 1 // ... } - self[kBufferedCommandSymbol] += cmd + '\n'; + self[kBufferedCommandSymbol] += cmd + "\n"; self.displayPrompt(); return; } else { - self._domain.emit('error', e.err || e); + self._domain.emit("error", e.err || e); } } @@ -748,16 +764,18 @@ function REPLServer(prompt, sawCtrlD = false; // If we got any output - print it (if no error) - if (!e && - // When an invalid REPL command is used, error message is printed - // immediately. We don't have to print anything else. So, only when - // the second argument to this function is there, print it. - arguments.length === 2 && - (!self.ignoreUndefined || ret !== undefined)) { + if ( + !e && + // When an invalid REPL command is used, error message is printed + // immediately. We don't have to print anything else. So, only when + // the second argument to this function is there, print it. + arguments.length === 2 && + (!self.ignoreUndefined || ret !== undefined) + ) { if (!self.underscoreAssigned) { self.last = ret; } - self.outputStream.write(self.writer(ret) + '\n'); + self.outputStream.write(self.writer(ret) + "\n"); } // Display prompt again @@ -765,11 +783,12 @@ function REPLServer(prompt, } }); - self.on('SIGCONT', function onSigCont() { + self.on("SIGCONT", function onSigCont() { if (self.editorMode) { self.outputStream.write(`${self._initialPrompt}.editor\n`); self.outputStream.write( - '// Entering editor mode (^D to finish, ^C to cancel)\n'); + "// Entering editor mode (^D to finish, ^C to cancel)\n" + ); self.outputStream.write(`${self[kBufferedCommandSymbol]}\n`); self.prompt(true); } else { @@ -781,14 +800,18 @@ function REPLServer(prompt, const ttyWrite = self._ttyWrite.bind(self); self._ttyWrite = (d, key) => { key = key || {}; - if (paused && !(self.breakEvalOnSigint && key.ctrl && key.name === 'c')) { - pausedBuffer.push(['key', [d, key]]); + if (paused && !(self.breakEvalOnSigint && key.ctrl && key.name === "c")) { + pausedBuffer.push(["key", [d, key]]); return; } if (!self.editorMode || !self.terminal) { // Before exiting, make sure to clear the line. - if (key.ctrl && key.name === 'd' && - self.cursor === 0 && self.line.length === 0) { + if ( + key.ctrl && + key.name === "d" && + self.cursor === 0 && + self.line.length === 0 + ) { self.clearLine(); } ttyWrite(d, key); @@ -798,23 +821,23 @@ function REPLServer(prompt, // Editor mode if (key.ctrl && !key.shift) { switch (key.name) { - case 'd': // End editor mode + case "d": // End editor mode _turnOffEditorMode(self); sawCtrlD = true; - ttyWrite(d, { name: 'return' }); + ttyWrite(d, { name: "return" }); break; - case 'n': // Override next history item - case 'p': // Override previous history item + case "n": // Override next history item + case "p": // Override previous history item break; default: ttyWrite(d, key); } } else { switch (key.name) { - case 'up': // Override previous history item - case 'down': // Override next history item + case "up": // Override previous history item + case "down": // Override next history item break; - case 'tab': + case "tab": // Prevent double tab behavior self._previousKey = null; ttyWrite(d, key); @@ -832,23 +855,27 @@ Object.setPrototypeOf(REPLServer, Interface); exports.REPLServer = REPLServer; -exports.REPL_MODE_SLOPPY = Symbol('repl-sloppy'); -exports.REPL_MODE_STRICT = Symbol('repl-strict'); +exports.REPL_MODE_SLOPPY = Symbol("repl-sloppy"); +exports.REPL_MODE_STRICT = Symbol("repl-strict"); // Prompt is a string to print on each line for the prompt, // source is a stream to use for I/O, defaulting to stdin/stdout. -exports.start = function(prompt, - source, - eval_, - useGlobal, - ignoreUndefined, - replMode) { - const repl = new REPLServer(prompt, - source, - eval_, - useGlobal, - ignoreUndefined, - replMode); +exports.start = function( + prompt, + source, + eval_, + useGlobal, + ignoreUndefined, + replMode +) { + const repl = new REPLServer( + prompt, + source, + eval_, + useGlobal, + ignoreUndefined, + replMode + ); if (!exports.repl) exports.repl = repl; replMap.set(repl, repl); return repl; @@ -859,21 +886,17 @@ REPLServer.prototype.setupHistory = function setupHistory(historyFile, cb) { }; REPLServer.prototype.clearBufferedCommand = function clearBufferedCommand() { - this[kBufferedCommandSymbol] = ''; + this[kBufferedCommandSymbol] = ""; }; REPLServer.prototype.close = function close() { if (this.terminal && this._flushing && !this._closingOnFlush) { this._closingOnFlush = true; - this.once('flushHistory', () => - Interface.prototype.close.call(this) - ); + this.once("flushHistory", () => Interface.prototype.close.call(this)); return; } - process.nextTick(() => - Interface.prototype.close.call(this) - ); + process.nextTick(() => Interface.prototype.close.call(this)); }; REPLServer.prototype.createContext = function() { @@ -881,41 +904,47 @@ REPLServer.prototype.createContext = function() { if (this.useGlobal) { context = global; } else { - sendInspectorCommand((session) => { - session.post('Runtime.enable'); - session.once('Runtime.executionContextCreated', ({ params }) => { - this[kContextId] = params.context.id; - }); - context = vm.createContext(); - session.post('Runtime.disable'); - }, () => { - context = vm.createContext(); - }); + sendInspectorCommand( + session => { + session.post("Runtime.enable"); + session.once("Runtime.executionContextCreated", ({ params }) => { + this[kContextId] = params.context.id; + }); + context = vm.createContext(); + session.post("Runtime.disable"); + }, + () => { + context = vm.createContext(); + } + ); for (const name of Object.getOwnPropertyNames(global)) { // Only set properties on the context that do not exist as primordial. if (!(name in primordials)) { - Object.defineProperty(context, name, - Object.getOwnPropertyDescriptor(global, name)); + Object.defineProperty( + context, + name, + Object.getOwnPropertyDescriptor(global, name) + ); } } context.global = context; const _console = new Console(this.outputStream); - Object.defineProperty(context, 'console', { + Object.defineProperty(context, "console", { configurable: true, writable: true, value: _console }); } - const module = new CJSModule(''); - module.paths = CJSModule._resolveLookupPaths('', parentModule) || []; + const module = new CJSModule(""); + module.paths = CJSModule._resolveLookupPaths("", parentModule) || []; - Object.defineProperty(context, 'module', { + Object.defineProperty(context, "module", { configurable: true, writable: true, value: module }); - Object.defineProperty(context, 'require', { + Object.defineProperty(context, "require", { configurable: true, writable: true, value: makeRequireFunction(module) @@ -933,42 +962,43 @@ REPLServer.prototype.resetContext = function() { this.lines = []; this.lines.level = []; - Object.defineProperty(this.context, '_', { + Object.defineProperty(this.context, "_", { configurable: true, get: () => this.last, - set: (value) => { + set: value => { this.last = value; if (!this.underscoreAssigned) { this.underscoreAssigned = true; - this.outputStream.write('Expression assignment to _ now disabled.\n'); + this.outputStream.write("Expression assignment to _ now disabled.\n"); } } }); - Object.defineProperty(this.context, '_error', { + Object.defineProperty(this.context, "_error", { configurable: true, get: () => this.lastError, - set: (value) => { + set: value => { this.lastError = value; if (!this.underscoreErrAssigned) { this.underscoreErrAssigned = true; this.outputStream.write( - 'Expression assignment to _error now disabled.\n'); + "Expression assignment to _error now disabled.\n" + ); } } }); // Allow REPL extensions to extend the new context - this.emit('reset', this.context); + this.emit("reset", this.context); }; REPLServer.prototype.displayPrompt = function(preserveCursor) { let prompt = this._initialPrompt; if (this[kBufferedCommandSymbol].length) { - prompt = '...'; + prompt = "..."; const len = this.lines.level.length ? this.lines.level.length - 1 : 0; - const levelInd = '..'.repeat(len); - prompt += levelInd + ' '; + const levelInd = "..".repeat(len); + prompt += levelInd + " "; } // Do not overwrite `_initialPrompt` here @@ -983,9 +1013,12 @@ REPLServer.prototype.setPrompt = function setPrompt(prompt) { }; REPLServer.prototype.turnOffEditorMode = deprecate( - function() { _turnOffEditorMode(this); }, - 'REPLServer.turnOffEditorMode() is deprecated', - 'DEP0078'); + function() { + _turnOffEditorMode(this); + }, + "REPLServer.turnOffEditorMode() is deprecated", + "DEP0078" +); // A stream to push an array into a REPL // used in REPLServer.complete @@ -993,8 +1026,7 @@ function ArrayStream() { Stream.call(this); this.run = function(data) { - for (let n = 0; n < data.length; n++) - this.emit('data', `${data[n]}\n`); + for (let n = 0; n < data.length; n++) this.emit("data", `${data[n]}\n`); }; } Object.setPrototypeOf(ArrayStream.prototype, Stream.prototype); @@ -1006,11 +1038,10 @@ ArrayStream.prototype.write = function() {}; const requireRE = /\brequire\s*\(['"](([\w@./-]+\/)?(?:[\w@./-]*))/; const fsAutoCompleteRE = /fs(?:\.promises)?\.\s*[a-z][a-zA-Z]+\(\s*["'](.*)/; -const simpleExpressionRE = - /(?:[a-zA-Z_$](?:\w|\$)*\.)*[a-zA-Z_$](?:\w|\$)*\.?$/; +const simpleExpressionRE = /(?:[a-zA-Z_$](?:\w|\$)*\.)*[a-zA-Z_$](?:\w|\$)*\.?$/; function isIdentifier(str) { - if (str === '') { + if (str === "") { return false; } const first = str.codePointAt(0); @@ -1037,15 +1068,22 @@ function filteredOwnPropertyNames(obj) { } function getGlobalLexicalScopeNames(contextId) { - return sendInspectorCommand((session) => { - let names = []; - session.post('Runtime.globalLexicalScopeNames', { - executionContextId: contextId - }, (error, result) => { - if (!error) names = result.names; - }); - return names; - }, () => []); + return sendInspectorCommand( + session => { + let names = []; + session.post( + "Runtime.globalLexicalScopeNames", + { + executionContextId: contextId + }, + (error, result) => { + if (!error) names = result.names; + } + ); + return names; + }, + () => [] + ); } REPLServer.prototype.complete = function() { @@ -1064,25 +1102,28 @@ REPLServer.prototype.complete = function() { // getter code. function complete(line, callback) { // There may be local variables to evaluate, try a nested REPL - if (this[kBufferedCommandSymbol] !== undefined && - this[kBufferedCommandSymbol].length) { + if ( + this[kBufferedCommandSymbol] !== undefined && + this[kBufferedCommandSymbol].length + ) { // Get a new array of inputted lines const tmp = this.lines.slice(); // Kill off all function declarations to push all local variables into // global scope for (let n = 0; n < this.lines.level.length; n++) { const kill = this.lines.level[n]; - if (kill.isFunction) - tmp[kill.line] = ''; + if (kill.isFunction) tmp[kill.line] = ""; } - const flat = new ArrayStream(); // Make a new "input" stream. - const magic = new REPLServer('', flat); // Make a nested REPL. + const flat = new ArrayStream(); // Make a new "input" stream. + const magic = new REPLServer("", flat); // Make a nested REPL. replMap.set(magic, replMap.get(this)); - flat.run(tmp); // `eval` the flattened code. + flat.run(tmp); // `eval` the flattened code. // All this is only profitable if the nested REPL does not have a // bufferedCommand. if (!magic[kBufferedCommandSymbol]) { - magic._domain.on('error', (err) => { throw err; }); + magic._domain.on("error", err => { + throw err; + }); return magic.complete(line, callback); } } @@ -1102,24 +1143,25 @@ function complete(line, callback) { } completionGroupsLoaded(); - } else if (match = line.match(requireRE)) { + } else if ((match = line.match(requireRE))) { // require('...') const exts = Object.keys(this.context.require.extensions); - const indexRe = new RegExp('^index(?:' + exts.map(regexpEscape).join('|') + - ')$'); + const indexRe = new RegExp( + "^index(?:" + exts.map(regexpEscape).join("|") + ")$" + ); const versionedFileNamesRe = /-\d+\.\d+/; completeOn = match[1]; - const subdir = match[2] || ''; + const subdir = match[2] || ""; filter = match[1]; let dir, files, name, base, ext, abs, subfiles, isDirectory; group = []; let paths = []; - if (completeOn === '.') { - group = ['./', '../']; - } else if (completeOn === '..') { - group = ['../']; + if (completeOn === ".") { + group = ["./", "../"]; + } else if (completeOn === "..") { + group = ["../"]; } else if (/^\.\.?\//.test(completeOn)) { paths = [process.cwd()]; } else { @@ -1137,7 +1179,7 @@ function complete(line, callback) { name = files[f]; ext = path.extname(name); base = name.slice(0, -ext.length); - if (versionedFileNamesRe.test(base) || name === '.npm') { + if (versionedFileNamesRe.test(base) || name === ".npm") { // Exclude versioned names that 'npm' installs. continue; } @@ -1148,7 +1190,7 @@ function complete(line, callback) { continue; } if (isDirectory) { - group.push(subdir + name + '/'); + group.push(subdir + name + "/"); try { subfiles = fs.readdirSync(abs); } catch { @@ -1159,7 +1201,7 @@ function complete(line, callback) { group.push(subdir + name); } } - } else if (exts.includes(ext) && (!subdir || base !== 'index')) { + } else if (exts.includes(ext) && (!subdir || base !== "index")) { group.push(subdir + base); } } @@ -1173,55 +1215,54 @@ function complete(line, callback) { } completionGroupsLoaded(); - } else if (match = line.match(fsAutoCompleteRE)) { - + } else if ((match = line.match(fsAutoCompleteRE))) { let filePath = match[1]; let fileList; - filter = ''; + filter = ""; try { fileList = fs.readdirSync(filePath, { withFileTypes: true }); - completionGroups.push(fileList.map((dirent) => dirent.name)); - completeOn = ''; + completionGroups.push(fileList.map(dirent => dirent.name)); + completeOn = ""; } catch { try { const baseName = path.basename(filePath); filePath = path.dirname(filePath); fileList = fs.readdirSync(filePath, { withFileTypes: true }); - const filteredValue = fileList.filter((d) => - d.name.startsWith(baseName)) - .map((d) => d.name); + const filteredValue = fileList + .filter(d => d.name.startsWith(baseName)) + .map(d => d.name); completionGroups.push(filteredValue); completeOn = baseName; } catch {} } completionGroupsLoaded(); - // Handle variable member lookup. - // We support simple chained expressions like the following (no function - // calls, etc.). That is for simplicity and also because we *eval* that - // leading expression so for safety (see WARNING above) don't want to - // eval function calls. - // - // foo.bar<|> # completions for 'foo' with filter 'bar' - // spam.eggs.<|> # completions for 'spam.eggs' with filter '' - // foo<|> # all scope vars with filter 'foo' - // foo.<|> # completions for 'foo' with filter '' + // Handle variable member lookup. + // We support simple chained expressions like the following (no function + // calls, etc.). That is for simplicity and also because we *eval* that + // leading expression so for safety (see WARNING above) don't want to + // eval function calls. + // + // foo.bar<|> # completions for 'foo' with filter 'bar' + // spam.eggs.<|> # completions for 'spam.eggs' with filter '' + // foo<|> # all scope vars with filter 'foo' + // foo.<|> # completions for 'foo' with filter '' } else if (line.length === 0 || /\w|\.|\$/.test(line[line.length - 1])) { match = simpleExpressionRE.exec(line); if (line.length === 0 || match) { let expr; - completeOn = (match ? match[0] : ''); + completeOn = match ? match[0] : ""; if (line.length === 0) { - filter = ''; - expr = ''; - } else if (line[line.length - 1] === '.') { - filter = ''; + filter = ""; + expr = ""; + } else if (line[line.length - 1] === ".") { + filter = ""; expr = match[0].slice(0, match[0].length - 1); } else { - const bits = match[0].split('.'); + const bits = match[0].split("."); filter = bits.pop(); - expr = bits.join('.'); + expr = bits.join("."); } // Resolve expr and get its completions. @@ -1232,34 +1273,36 @@ function complete(line, callback) { if (this.useGlobal || vm.isContext(this.context)) { completionGroups.push(getGlobalLexicalScopeNames(this[kContextId])); let contextProto = this.context; - while (contextProto = Object.getPrototypeOf(contextProto)) { + while ((contextProto = Object.getPrototypeOf(contextProto))) { completionGroups.push( - filteredOwnPropertyNames.call(this, contextProto)); + filteredOwnPropertyNames.call(this, contextProto) + ); } completionGroups.push( - filteredOwnPropertyNames.call(this, this.context)); - if (filter !== '') addCommonWords(completionGroups); + filteredOwnPropertyNames.call(this, this.context) + ); + if (filter !== "") addCommonWords(completionGroups); completionGroupsLoaded(); } else { - this.eval('.scope', this.context, 'repl', function ev(err, globals) { + this.eval(".scope", this.context, "repl", function ev(err, globals) { if (err || !Array.isArray(globals)) { - if (filter !== '') addCommonWords(completionGroups); + if (filter !== "") addCommonWords(completionGroups); } else if (Array.isArray(globals[0])) { // Add grouped globals for (let n = 0; n < globals.length; n++) completionGroups.push(globals[n]); } else { completionGroups.push(globals); - if (filter !== '') addCommonWords(completionGroups); + if (filter !== "") addCommonWords(completionGroups); } completionGroupsLoaded(); }); } } else { const evalExpr = `try { ${expr} } catch {}`; - this.eval(evalExpr, this.context, 'repl', (e, obj) => { + this.eval(evalExpr, this.context, "repl", (e, obj) => { if (obj != null) { - if (typeof obj === 'object' || typeof obj === 'function') { + if (typeof obj === "object" || typeof obj === "function") { try { memberGroups.push(filteredOwnPropertyNames.call(this, obj)); } catch { @@ -1273,7 +1316,7 @@ function complete(line, callback) { try { let sentinel = 5; let p; - if (typeof obj === 'object' || typeof obj === 'function') { + if (typeof obj === "object" || typeof obj === "function") { p = Object.getPrototypeOf(obj); } else { p = obj.constructor ? obj.constructor.prototype : null; @@ -1293,7 +1336,8 @@ function complete(line, callback) { if (memberGroups.length) { for (let i = 0; i < memberGroups.length; i++) { completionGroups.push( - memberGroups[i].map((member) => `${expr}.${member}`)); + memberGroups[i].map(member => `${expr}.${member}`) + ); } if (filter) { filter = `${expr}.${filter}`; @@ -1317,8 +1361,7 @@ function complete(line, callback) { if (completionGroups.length && filter) { const newCompletionGroups = []; for (let i = 0; i < completionGroups.length; i++) { - group = completionGroups[i] - .filter((elem) => elem.indexOf(filter) === 0); + group = completionGroups[i].filter(elem => elem.indexOf(filter) === 0); if (group.length) { newCompletionGroups.push(group); } @@ -1329,7 +1372,7 @@ function complete(line, callback) { let completions; if (completionGroups.length) { - const uniq = {}; // Unique completions across all groups + const uniq = {}; // Unique completions across all groups completions = []; // Completion group 0 is the "closest" // (least far up the inheritance chain) @@ -1344,9 +1387,9 @@ function complete(line, callback) { uniq[c] = true; } } - completions.unshift(''); // Separator btwn groups + completions.unshift(""); // Separator btwn groups } - while (completions.length && completions[0] === '') { + while (completions.length && completions[0] === "") { completions.shift(); } } @@ -1357,7 +1400,7 @@ function complete(line, callback) { function longestCommonPrefix(arr = []) { const cnt = arr.length; - if (cnt === 0) return ''; + if (cnt === 0) return ""; if (cnt === 1) return arr[0]; const first = arr[0]; @@ -1374,34 +1417,35 @@ function longestCommonPrefix(arr = []) { return first; } -REPLServer.prototype.completeOnEditorMode = (callback) => (err, results) => { +REPLServer.prototype.completeOnEditorMode = callback => (err, results) => { if (err) return callback(err); - const [completions, completeOn = ''] = results; + const [completions, completeOn = ""] = results; const prefixLength = completeOn.length; if (prefixLength === 0) return callback(null, [[], completeOn]); - const isNotEmpty = (v) => v.length > 0; - const trimCompleteOnPrefix = (v) => v.substring(prefixLength); + const isNotEmpty = v => v.length > 0; + const trimCompleteOnPrefix = v => v.substring(prefixLength); const data = completions.filter(isNotEmpty).map(trimCompleteOnPrefix); callback(null, [[`${completeOn}${longestCommonPrefix(data)}`], completeOn]); }; REPLServer.prototype.defineCommand = function(keyword, cmd) { - if (typeof cmd === 'function') { + if (typeof cmd === "function") { cmd = { action: cmd }; - } else if (typeof cmd.action !== 'function') { - throw new ERR_INVALID_ARG_TYPE('action', 'Function', cmd.action); + } else if (typeof cmd.action !== "function") { + throw new ERR_INVALID_ARG_TYPE("action", "Function", cmd.action); } this.commands[keyword] = cmd; }; REPLServer.prototype.memory = deprecate( _memory, - 'REPLServer.memory() is deprecated', - 'DEP0082'); + "REPLServer.memory() is deprecated", + "DEP0082" +); function _memory(cmd) { const self = this; @@ -1412,10 +1456,10 @@ function _memory(cmd) { if (cmd) { // TODO should I tab the level? const len = self.lines.level.length ? self.lines.level.length - 1 : 0; - self.lines.push(' '.repeat(len) + cmd); + self.lines.push(" ".repeat(len) + cmd); } else { // I don't want to not change the format too much... - self.lines.push(''); + self.lines.push(""); } // I need to know "depth." @@ -1461,7 +1505,7 @@ function _memory(cmd) { } } } - }()); + })(); } // It is possible to determine a syntax error at this point. @@ -1480,17 +1524,48 @@ function addCommonWords(completionGroups) { // Only words which do not yet exist as global property should be added to // this list. completionGroups.push([ - 'async', 'await', 'break', 'case', 'catch', 'const', 'continue', - 'debugger', 'default', 'delete', 'do', 'else', 'export', 'false', - 'finally', 'for', 'function', 'if', 'import', 'in', 'instanceof', 'let', - 'new', 'null', 'return', 'switch', 'this', 'throw', 'true', 'try', - 'typeof', 'var', 'void', 'while', 'with', 'yield' + "async", + "await", + "break", + "case", + "catch", + "const", + "continue", + "debugger", + "default", + "delete", + "do", + "else", + "export", + "false", + "finally", + "for", + "function", + "if", + "import", + "in", + "instanceof", + "let", + "new", + "null", + "return", + "switch", + "this", + "throw", + "true", + "try", + "typeof", + "var", + "void", + "while", + "with", + "yield" ]); } function _turnOnEditorMode(repl) { repl.editorMode = true; - Interface.prototype.setPrompt.call(repl, ''); + Interface.prototype.setPrompt.call(repl, ""); } function _turnOffEditorMode(repl) { @@ -1499,8 +1574,8 @@ function _turnOffEditorMode(repl) { } function defineDefaultCommands(repl) { - repl.defineCommand('break', { - help: 'Sometimes you get stuck, this gets you out', + repl.defineCommand("break", { + help: "Sometimes you get stuck, this gets you out", action: function() { this.clearBufferedCommand(); this.displayPrompt(); @@ -1509,31 +1584,31 @@ function defineDefaultCommands(repl) { let clearMessage; if (repl.useGlobal) { - clearMessage = 'Alias for .break'; + clearMessage = "Alias for .break"; } else { - clearMessage = 'Break, and also clear the local context'; + clearMessage = "Break, and also clear the local context"; } - repl.defineCommand('clear', { + repl.defineCommand("clear", { help: clearMessage, action: function() { this.clearBufferedCommand(); if (!this.useGlobal) { - this.outputStream.write('Clearing context...\n'); + this.outputStream.write("Clearing context...\n"); this.resetContext(); } this.displayPrompt(); } }); - repl.defineCommand('exit', { - help: 'Exit the repl', + repl.defineCommand("exit", { + help: "Exit the repl", action: function() { this.close(); } }); - repl.defineCommand('help', { - help: 'Print this help message', + repl.defineCommand("help", { + help: "Print this help message", action: function() { const names = Object.keys(this.commands).sort(); const longestNameLength = names.reduce( @@ -1543,21 +1618,22 @@ function defineDefaultCommands(repl) { for (let n = 0; n < names.length; n++) { const name = names[n]; const cmd = this.commands[name]; - const spaces = ' '.repeat(longestNameLength - name.length + 3); - const line = `.${name}${cmd.help ? spaces + cmd.help : ''}\n`; + const spaces = " ".repeat(longestNameLength - name.length + 3); + const line = `.${name}${cmd.help ? spaces + cmd.help : ""}\n`; this.outputStream.write(line); } - this.outputStream.write('\nPress ^C to abort current expression, ' + - '^D to exit the repl\n'); + this.outputStream.write( + "\nPress ^C to abort current expression, " + "^D to exit the repl\n" + ); this.displayPrompt(); } }); - repl.defineCommand('save', { - help: 'Save all evaluated commands in this REPL session to a file', + repl.defineCommand("save", { + help: "Save all evaluated commands in this REPL session to a file", action: function(file) { try { - fs.writeFileSync(file, this.lines.join('\n')); + fs.writeFileSync(file, this.lines.join("\n")); this.outputStream.write(`Session saved to: ${file}\n`); } catch { this.outputStream.write(`Failed to save: ${file}\n`); @@ -1566,17 +1642,17 @@ function defineDefaultCommands(repl) { } }); - repl.defineCommand('load', { - help: 'Load JS from a file into the REPL session', + repl.defineCommand("load", { + help: "Load JS from a file into the REPL session", action: function(file) { try { const stats = fs.statSync(file); if (stats && stats.isFile()) { _turnOnEditorMode(this); - const data = fs.readFileSync(file, 'utf8'); + const data = fs.readFileSync(file, "utf8"); this.write(data); _turnOffEditorMode(this); - this.write('\n'); + this.write("\n"); } else { this.outputStream.write( `Failed to load: ${file} is not a valid file\n` @@ -1589,19 +1665,20 @@ function defineDefaultCommands(repl) { } }); if (repl.terminal) { - repl.defineCommand('editor', { - help: 'Enter editor mode', + repl.defineCommand("editor", { + help: "Enter editor mode", action() { _turnOnEditorMode(this); this.outputStream.write( - '// Entering editor mode (^D to finish, ^C to cancel)\n'); + "// Entering editor mode (^D to finish, ^C to cancel)\n" + ); } }); } } function regexpEscape(s) { - return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&'); + return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, "\\$&"); } function Recoverable(err) { diff --git a/src/node_internals.h b/src/node_internals.h index 0f4c32bb941a31..bb5077eda9862d 100644 --- a/src/node_internals.h +++ b/src/node_internals.h @@ -71,18 +71,18 @@ v8::Local AddressToJS( template void GetSockOrPeerName(const v8::FunctionCallbackInfo& args) { - T* wrap; - ASSIGN_OR_RETURN_UNWRAP(&wrap, - args.Holder(), - args.GetReturnValue().Set(UV_EBADF)); - CHECK(args[0]->IsObject()); - sockaddr_storage storage; - int addrlen = sizeof(storage); - sockaddr* const addr = reinterpret_cast(&storage); - const int err = F(&wrap->handle_, addr, &addrlen); - if (err == 0) - AddressToJS(wrap->env(), addr, args[0].As()); - args.GetReturnValue().Set(err); + T* wrap; + ASSIGN_OR_RETURN_UNWRAP(&wrap, + args.Holder(), + args.GetReturnValue().Set(UV_EBADF)); + CHECK(args[0]->IsObject()); + sockaddr_storage storage; + int addrlen = sizeof(storage); + sockaddr* const addr = reinterpret_cast(&storage); + const int err = F(&wrap->handle_, addr, &addrlen); + if (err == 0) + AddressToJS(wrap->env(), addr, args[0].As()); + args.GetReturnValue().Set(err); } void PrintStackTrace(v8::Isolate* isolate, v8::Local stack); @@ -106,41 +106,49 @@ void PromiseRejectCallback(v8::PromiseRejectMessage message); } // namespace task_queue class NodeArrayBufferAllocator : public ArrayBufferAllocator { - public: - inline uint32_t* zero_fill_field() { return &zero_fill_field_; } - - void* Allocate(size_t size) override; // Defined in src/node.cc - void* AllocateUninitialized(size_t size) override - { return node::UncheckedMalloc(size); } - void Free(void* data, size_t) override { free(data); } - virtual void* Reallocate(void* data, size_t old_size, size_t size) { - return static_cast( - UncheckedRealloc(static_cast(data), size)); - } - virtual void RegisterPointer(void* data, size_t size) {} - virtual void UnregisterPointer(void* data, size_t size) {} - - NodeArrayBufferAllocator* GetImpl() final { return this; } - - private: - uint32_t zero_fill_field_ = 1; // Boolean but exposed as uint32 to JS land. +public: + inline uint32_t* zero_fill_field() { + return &zero_fill_field_; + } + + void* Allocate(size_t size) override; // Defined in src/node.cc + void* AllocateUninitialized(size_t size) override + { + return node::UncheckedMalloc(size); + } + void Free(void* data, size_t) override { + free(data); + } + virtual void* Reallocate(void* data, size_t old_size, size_t size) { + return static_cast( + UncheckedRealloc(static_cast(data), size)); + } + virtual void RegisterPointer(void* data, size_t size) {} + virtual void UnregisterPointer(void* data, size_t size) {} + + NodeArrayBufferAllocator* GetImpl() final { + return this; + } + +private: + uint32_t zero_fill_field_ = 1; // Boolean but exposed as uint32 to JS land. }; class DebuggingArrayBufferAllocator final : public NodeArrayBufferAllocator { - public: - ~DebuggingArrayBufferAllocator() override; - void* Allocate(size_t size) override; - void* AllocateUninitialized(size_t size) override; - void Free(void* data, size_t size) override; - void* Reallocate(void* data, size_t old_size, size_t size) override; - void RegisterPointer(void* data, size_t size) override; - void UnregisterPointer(void* data, size_t size) override; - - private: - void RegisterPointerInternal(void* data, size_t size); - void UnregisterPointerInternal(void* data, size_t size); - Mutex mutex_; - std::unordered_map allocations_; +public: + ~DebuggingArrayBufferAllocator() override; + void* Allocate(size_t size) override; + void* AllocateUninitialized(size_t size) override; + void Free(void* data, size_t size) override; + void* Reallocate(void* data, size_t old_size, size_t size) override; + void RegisterPointer(void* data, size_t size) override; + void UnregisterPointer(void* data, size_t size) override; + +private: + void RegisterPointerInternal(void* data, size_t size); + void UnregisterPointerInternal(void* data, size_t size); + Mutex mutex_; + std::unordered_map allocations_; }; namespace Buffer { @@ -170,22 +178,22 @@ v8::MaybeLocal New(Environment* env, template static v8::MaybeLocal New(Environment* env, MaybeStackBuffer* buf) { - v8::MaybeLocal ret; - char* src = reinterpret_cast(buf->out()); - const size_t len_in_bytes = buf->length() * sizeof(buf->out()[0]); + v8::MaybeLocal ret; + char* src = reinterpret_cast(buf->out()); + const size_t len_in_bytes = buf->length() * sizeof(buf->out()[0]); - if (buf->IsAllocated()) - ret = New(env, src, len_in_bytes, true); - else if (!buf->IsInvalidated()) - ret = Copy(env, src, len_in_bytes); + if (buf->IsAllocated()) + ret = New(env, src, len_in_bytes, true); + else if (!buf->IsInvalidated()) + ret = Copy(env, src, len_in_bytes); - if (ret.IsEmpty()) - return ret; + if (ret.IsEmpty()) + return ret; - if (buf->IsAllocated()) - buf->Release(); + if (buf->IsAllocated()) + buf->Release(); - return ret; + return ret; } } // namespace Buffer @@ -198,61 +206,65 @@ v8::MaybeLocal InternalMakeCallback( async_context asyncContext); class InternalCallbackScope { - public: - // Tell the constructor whether its `object` parameter may be empty or not. - enum ResourceExpectation { kRequireResource, kAllowEmptyResource }; - InternalCallbackScope(Environment* env, - v8::Local object, - const async_context& asyncContext, - ResourceExpectation expect = kRequireResource); - // Utility that can be used by AsyncWrap classes. - explicit InternalCallbackScope(AsyncWrap* async_wrap); - ~InternalCallbackScope(); - void Close(); - - inline bool Failed() const { return failed_; } - inline void MarkAsFailed() { failed_ = true; } - - private: - Environment* env_; - async_context async_context_; - v8::Local object_; - AsyncCallbackScope callback_scope_; - bool failed_ = false; - bool pushed_ids_ = false; - bool closed_ = false; +public: + // Tell the constructor whether its `object` parameter may be empty or not. + enum ResourceExpectation { kRequireResource, kAllowEmptyResource }; + InternalCallbackScope(Environment* env, + v8::Local object, + const async_context& asyncContext, + ResourceExpectation expect = kRequireResource); + // Utility that can be used by AsyncWrap classes. + explicit InternalCallbackScope(AsyncWrap* async_wrap); + ~InternalCallbackScope(); + void Close(); + + inline bool Failed() const { + return failed_; + } + inline void MarkAsFailed() { + failed_ = true; + } + +private: + Environment* env_; + async_context async_context_; + v8::Local object_; + AsyncCallbackScope callback_scope_; + bool failed_ = false; + bool pushed_ids_ = false; + bool closed_ = false; }; class DebugSealHandleScope { - public: - explicit inline DebugSealHandleScope(v8::Isolate* isolate) +public: + explicit inline DebugSealHandleScope(v8::Isolate* isolate) #ifdef DEBUG - : actual_scope_(isolate) + : actual_scope_(isolate) #endif - {} + {} - private: +private: #ifdef DEBUG - v8::SealHandleScope actual_scope_; + v8::SealHandleScope actual_scope_; #endif }; class ThreadPoolWork { - public: - explicit inline ThreadPoolWork(Environment* env) : env_(env) { - CHECK_NOT_NULL(env); - } - inline virtual ~ThreadPoolWork() = default; +public: + explicit inline ThreadPoolWork(Environment* env) : env_(env) { + CHECK_NOT_NULL(env); + } + inline virtual ~ThreadPoolWork() = default; - inline void ScheduleWork(); - inline int CancelWork(); + inline void ScheduleWork(); + inline int CancelWork(); - virtual void DoThreadPoolWork() = 0; - virtual void AfterThreadPoolWork(int status) = 0; + virtual void DoThreadPoolWork() = 0; + virtual void AfterThreadPoolWork(int status) = 0; - private: - Environment* env_; - uv_work_t work_req_; +private: + Environment* env_; + uv_work_t work_req_; }; #define TRACING_CATEGORY_NODE "node" @@ -279,7 +291,7 @@ v8::Isolate* NewIsolate(v8::Isolate::CreateParams* params, uv_loop_t* event_loop, MultiIsolatePlatform* platform); v8::MaybeLocal StartExecution(Environment* env, - const char* main_script_id); + const char* main_script_id); v8::MaybeLocal GetPerContextExports(v8::Local context); v8::MaybeLocal ExecuteBootstrapper( Environment* env, @@ -289,10 +301,10 @@ v8::MaybeLocal ExecuteBootstrapper( void MarkBootstrapComplete(const v8::FunctionCallbackInfo& args); struct InitializationResult { - int exit_code = 0; - std::vector args; - std::vector exec_args; - bool early_return = false; + int exit_code = 0; + std::vector args; + std::vector exec_args; + bool early_return = false; }; InitializationResult InitializeOncePerProcess(int argc, char** argv); void TearDownOncePerProcess(); @@ -320,43 +332,43 @@ int WriteFileSync(v8::Isolate* isolate, v8::Local string); class DiagnosticFilename { - public: - static void LocalTime(TIME_TYPE* tm_struct); +public: + static void LocalTime(TIME_TYPE* tm_struct); - inline DiagnosticFilename(Environment* env, - const char* prefix, - const char* ext); + inline DiagnosticFilename(Environment* env, + const char* prefix, + const char* ext); - inline DiagnosticFilename(uint64_t thread_id, - const char* prefix, - const char* ext); + inline DiagnosticFilename(uint64_t thread_id, + const char* prefix, + const char* ext); - inline const char* operator*() const; + inline const char* operator*() const; - private: - static std::string MakeFilename( - uint64_t thread_id, - const char* prefix, - const char* ext); +private: + static std::string MakeFilename( + uint64_t thread_id, + const char* prefix, + const char* ext); - std::string filename_; + std::string filename_; }; class TraceEventScope { - public: - TraceEventScope(const char* category, - const char* name, - void* id) : category_(category), name_(name), id_(id) { - TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_, name_, id_); - } - ~TraceEventScope() { - TRACE_EVENT_NESTABLE_ASYNC_END0(category_, name_, id_); - } - - private: - const char* category_; - const char* name_; - void* id_; +public: + TraceEventScope(const char* category, + const char* name, + void* id) : category_(category), name_(name), id_(id) { + TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_, name_, id_); + } + ~TraceEventScope() { + TRACE_EVENT_NESTABLE_ASYNC_END0(category_, name_, id_); + } + +private: + const char* category_; + const char* name_; + void* id_; }; } // namespace node diff --git a/src/node_metadata.h b/src/node_metadata.h index bf7e5d3ff4e811..d1ae5bffd178bb 100644 --- a/src/node_metadata.h +++ b/src/node_metadata.h @@ -54,48 +54,48 @@ namespace node { NODE_VERSIONS_KEY_INTL(V) class Metadata { - public: - Metadata(); - Metadata(Metadata&) = delete; - Metadata(Metadata&&) = delete; - Metadata operator=(Metadata&) = delete; - Metadata operator=(Metadata&&) = delete; +public: + Metadata(); + Metadata(Metadata&) = delete; + Metadata(Metadata&&) = delete; + Metadata operator=(Metadata&) = delete; + Metadata operator=(Metadata&&) = delete; - struct Versions { - Versions(); + struct Versions { + Versions(); #ifdef NODE_HAVE_I18N_SUPPORT - // Must be called on the main thread after - // i18n::InitializeICUDirectory() - void InitializeIntlVersions(); + // Must be called on the main thread after + // i18n::InitializeICUDirectory() + void InitializeIntlVersions(); #endif // NODE_HAVE_I18N_SUPPORT #define V(key) std::string key; - NODE_VERSIONS_KEYS(V) + NODE_VERSIONS_KEYS(V) #undef V - }; + }; - struct Release { - Release(); + struct Release { + Release(); - std::string name; + std::string name; #if NODE_VERSION_IS_LTS - std::string lts; + std::string lts; #endif // NODE_VERSION_IS_LTS #ifdef NODE_HAS_RELEASE_URLS - std::string source_url; - std::string headers_url; + std::string source_url; + std::string headers_url; #ifdef _WIN32 - std::string lib_url; + std::string lib_url; #endif // _WIN32 #endif // NODE_HAS_RELEASE_URLS - }; + }; - Versions versions; - const Release release; - const std::string arch; - const std::string platform; + Versions versions; + const Release release; + const std::string arch; + const std::string platform; }; // Per-process global diff --git a/src/node_options.h b/src/node_options.h index 161f9e76a02fd1..b7886bc43364f6 100644 --- a/src/node_options.h +++ b/src/node_options.h @@ -14,45 +14,51 @@ namespace node { class HostPort { - public: - HostPort(const std::string& host_name, int port) - : host_name_(host_name), port_(port) {} - HostPort(const HostPort&) = default; - HostPort& operator=(const HostPort&) = default; - HostPort(HostPort&&) = default; - HostPort& operator=(HostPort&&) = default; - - void set_host(const std::string& host) { host_name_ = host; } - - void set_port(int port) { port_ = port; } - - const std::string& host() const { return host_name_; } - - int port() const { - // TODO(joyeecheung): make port a uint16_t - CHECK_GE(port_, 0); - return port_; - } - - void Update(const HostPort& other) { - if (!other.host_name_.empty()) host_name_ = other.host_name_; - if (other.port_ >= 0) port_ = other.port_; - } - - private: - std::string host_name_; - int port_; +public: + HostPort(const std::string& host_name, int port) + : host_name_(host_name), port_(port) {} + HostPort(const HostPort&) = default; + HostPort& operator=(const HostPort&) = default; + HostPort(HostPort&&) = default; + HostPort& operator=(HostPort&&) = default; + + void set_host(const std::string& host) { + host_name_ = host; + } + + void set_port(int port) { + port_ = port; + } + + const std::string& host() const { + return host_name_; + } + + int port() const { + // TODO(joyeecheung): make port a uint16_t + CHECK_GE(port_, 0); + return port_; + } + + void Update(const HostPort& other) { + if (!other.host_name_.empty()) host_name_ = other.host_name_; + if (other.port_ >= 0) port_ = other.port_; + } + +private: + std::string host_name_; + int port_; }; class Options { - public: - virtual void CheckOptions(std::vector* errors) {} - virtual ~Options() = default; +public: + virtual void CheckOptions(std::vector* errors) {} + virtual ~Options() = default; }; struct InspectPublishUid { - bool console; - bool http; + bool console; + bool http; }; // These options are currently essentially per-Environment, but it can be nice @@ -60,178 +66,178 @@ struct InspectPublishUid { // specific part of Node. It might also make more sense for them to be // per-Isolate, rather than per-Environment. class DebugOptions : public Options { - public: - DebugOptions() = default; - DebugOptions(const DebugOptions&) = default; - DebugOptions& operator=(const DebugOptions&) = default; - DebugOptions(DebugOptions&&) = default; - DebugOptions& operator=(DebugOptions&&) = default; - - // --inspect - bool inspector_enabled = false; - // --debug - bool deprecated_debug = false; - // --inspect-brk - bool break_first_line = false; - // --inspect-brk-node - bool break_node_first_line = false; - // --inspect-publish-uid - std::string inspect_publish_uid_string = "stderr,http"; - - InspectPublishUid inspect_publish_uid; - - enum { kDefaultInspectorPort = 9229 }; - - HostPort host_port{"127.0.0.1", kDefaultInspectorPort}; - - // Used to patch the options as if --inspect-brk is passed. - void EnableBreakFirstLine() { - inspector_enabled = true; - break_first_line = true; - } - - bool wait_for_connect() const { - return break_first_line || break_node_first_line; - } - - void CheckOptions(std::vector* errors) override; +public: + DebugOptions() = default; + DebugOptions(const DebugOptions&) = default; + DebugOptions& operator=(const DebugOptions&) = default; + DebugOptions(DebugOptions&&) = default; + DebugOptions& operator=(DebugOptions&&) = default; + + // --inspect + bool inspector_enabled = false; + // --debug + bool deprecated_debug = false; + // --inspect-brk + bool break_first_line = false; + // --inspect-brk-node + bool break_node_first_line = false; + // --inspect-publish-uid + std::string inspect_publish_uid_string = "stderr,http"; + + InspectPublishUid inspect_publish_uid; + + enum { kDefaultInspectorPort = 9229 }; + + HostPort host_port{"127.0.0.1", kDefaultInspectorPort}; + + // Used to patch the options as if --inspect-brk is passed. + void EnableBreakFirstLine() { + inspector_enabled = true; + break_first_line = true; + } + + bool wait_for_connect() const { + return break_first_line || break_node_first_line; + } + + void CheckOptions(std::vector* errors) override; }; class EnvironmentOptions : public Options { - public: - bool abort_on_uncaught_exception = false; - bool experimental_exports = false; - bool experimental_modules = false; - std::string es_module_specifier_resolution; - bool experimental_wasm_modules = false; - std::string module_type; - std::string experimental_policy; - std::string experimental_policy_integrity; - bool has_policy_integrity_string; - bool experimental_repl_await = false; - bool experimental_vm_modules = false; - bool expose_internals = false; - bool frozen_intrinsics = false; - std::string heap_snapshot_signal; - bool no_deprecation = false; - bool no_force_async_hooks_checks = false; - bool no_warnings = false; - bool pending_deprecation = false; - bool preserve_symlinks = false; - bool preserve_symlinks_main = false; - bool prof_process = false; +public: + bool abort_on_uncaught_exception = false; + bool experimental_exports = false; + bool experimental_modules = false; + std::string es_module_specifier_resolution; + bool experimental_wasm_modules = false; + std::string module_type; + std::string experimental_policy; + std::string experimental_policy_integrity; + bool has_policy_integrity_string; + bool experimental_repl_await = false; + bool experimental_vm_modules = false; + bool expose_internals = false; + bool frozen_intrinsics = false; + std::string heap_snapshot_signal; + bool no_deprecation = false; + bool no_force_async_hooks_checks = false; + bool no_warnings = false; + bool pending_deprecation = false; + bool preserve_symlinks = false; + bool preserve_symlinks_main = false; + bool prof_process = false; #if HAVE_INSPECTOR - std::string cpu_prof_dir; - static const uint64_t kDefaultCpuProfInterval = 1000; - uint64_t cpu_prof_interval = kDefaultCpuProfInterval; - std::string cpu_prof_name; - bool cpu_prof = false; - std::string heap_prof_dir; - std::string heap_prof_name; - static const uint64_t kDefaultHeapProfInterval = 512 * 1024; - uint64_t heap_prof_interval = kDefaultHeapProfInterval; - bool heap_prof = false; + std::string cpu_prof_dir; + static const uint64_t kDefaultCpuProfInterval = 1000; + uint64_t cpu_prof_interval = kDefaultCpuProfInterval; + std::string cpu_prof_name; + bool cpu_prof = false; + std::string heap_prof_dir; + std::string heap_prof_name; + static const uint64_t kDefaultHeapProfInterval = 512 * 1024; + uint64_t heap_prof_interval = kDefaultHeapProfInterval; + bool heap_prof = false; #endif // HAVE_INSPECTOR - std::string redirect_warnings; - bool throw_deprecation = false; - bool trace_deprecation = false; - bool trace_sync_io = false; - bool trace_tls = false; - bool trace_warnings = false; - std::string unhandled_rejections; - std::string userland_loader; - - bool syntax_check_only = false; - bool has_eval_string = false; + std::string redirect_warnings; + bool throw_deprecation = false; + bool trace_deprecation = false; + bool trace_sync_io = false; + bool trace_tls = false; + bool trace_warnings = false; + std::string unhandled_rejections; + std::string userland_loader; + + bool syntax_check_only = false; + bool has_eval_string = false; #ifdef NODE_REPORT - bool experimental_report = false; + bool experimental_report = false; #endif // NODE_REPORT - std::string eval_string; - bool print_eval = false; - bool force_repl = false; + std::string eval_string; + bool print_eval = false; + bool force_repl = false; - bool tls_min_v1_0 = false; - bool tls_min_v1_1 = false; - bool tls_min_v1_2 = false; - bool tls_min_v1_3 = false; - bool tls_max_v1_2 = false; - bool tls_max_v1_3 = false; + bool tls_min_v1_0 = false; + bool tls_min_v1_1 = false; + bool tls_min_v1_2 = false; + bool tls_min_v1_3 = false; + bool tls_max_v1_2 = false; + bool tls_max_v1_3 = false; - std::vector preload_modules; + std::vector preload_modules; - std::vector user_argv; + std::vector user_argv; - inline DebugOptions* get_debug_options(); - inline const DebugOptions& debug_options() const; - void CheckOptions(std::vector* errors) override; + inline DebugOptions* get_debug_options(); + inline const DebugOptions& debug_options() const; + void CheckOptions(std::vector* errors) override; - private: - DebugOptions debug_options_; +private: + DebugOptions debug_options_; }; class PerIsolateOptions : public Options { - public: - std::shared_ptr per_env { new EnvironmentOptions() }; - bool track_heap_objects = false; - bool no_node_snapshot = false; +public: + std::shared_ptr per_env { new EnvironmentOptions() }; + bool track_heap_objects = false; + bool no_node_snapshot = false; #ifdef NODE_REPORT - bool report_uncaught_exception = false; - bool report_on_signal = false; - bool report_on_fatalerror = false; - std::string report_signal; - std::string report_filename; - std::string report_directory; + bool report_uncaught_exception = false; + bool report_on_signal = false; + bool report_on_fatalerror = false; + std::string report_signal; + std::string report_filename; + std::string report_directory; #endif // NODE_REPORT - inline EnvironmentOptions* get_per_env_options(); - void CheckOptions(std::vector* errors) override; + inline EnvironmentOptions* get_per_env_options(); + void CheckOptions(std::vector* errors) override; }; class PerProcessOptions : public Options { - public: - std::shared_ptr per_isolate { new PerIsolateOptions() }; - - std::string title; - std::string trace_event_categories; - std::string trace_event_file_pattern = "node_trace.${rotation}.log"; - uint64_t max_http_header_size = 8 * 1024; - int64_t v8_thread_pool_size = 4; - bool zero_fill_all_buffers = false; - bool debug_arraybuffer_allocations = false; - - std::vector security_reverts; - bool print_bash_completion = false; - bool print_help = false; - bool print_v8_help = false; - bool print_version = false; +public: + std::shared_ptr per_isolate { new PerIsolateOptions() }; + + std::string title; + std::string trace_event_categories; + std::string trace_event_file_pattern = "node_trace.${rotation}.log"; + uint64_t max_http_header_size = 8 * 1024; + int64_t v8_thread_pool_size = 4; + bool zero_fill_all_buffers = false; + bool debug_arraybuffer_allocations = false; + + std::vector security_reverts; + bool print_bash_completion = false; + bool print_help = false; + bool print_v8_help = false; + bool print_version = false; #ifdef NODE_HAVE_I18N_SUPPORT - std::string icu_data_dir; + std::string icu_data_dir; #endif - // TODO(addaleax): Some of these could probably be per-Environment. + // TODO(addaleax): Some of these could probably be per-Environment. #if HAVE_OPENSSL - std::string openssl_config; - std::string tls_cipher_list = DEFAULT_CIPHER_LIST_CORE; + std::string openssl_config; + std::string tls_cipher_list = DEFAULT_CIPHER_LIST_CORE; #ifdef NODE_OPENSSL_CERT_STORE - bool ssl_openssl_cert_store = true; + bool ssl_openssl_cert_store = true; #else - bool ssl_openssl_cert_store = false; + bool ssl_openssl_cert_store = false; #endif - bool use_openssl_ca = false; - bool use_bundled_ca = false; + bool use_openssl_ca = false; + bool use_bundled_ca = false; #if NODE_FIPS_MODE - bool enable_fips_crypto = false; - bool force_fips_crypto = false; + bool enable_fips_crypto = false; + bool force_fips_crypto = false; #endif #endif #ifdef NODE_REPORT - std::vector cmdline; + std::vector cmdline; #endif // NODE_REPORT - inline PerIsolateOptions* get_per_isolate_options(); - void CheckOptions(std::vector* errors) override; + inline PerIsolateOptions* get_per_isolate_options(); + void CheckOptions(std::vector* errors) override; }; // The actual options parser, as opposed to the structs containing them: @@ -239,204 +245,204 @@ class PerProcessOptions : public Options { namespace options_parser { HostPort SplitHostPort(const std::string& arg, - std::vector* errors); + std::vector* errors); void GetOptions(const v8::FunctionCallbackInfo& args); enum OptionEnvvarSettings { - kAllowedInEnvironment, - kDisallowedInEnvironment + kAllowedInEnvironment, + kDisallowedInEnvironment }; enum OptionType { - kNoOp, - kV8Option, - kBoolean, - kInteger, - kUInteger, - kString, - kHostPort, - kStringList, + kNoOp, + kV8Option, + kBoolean, + kInteger, + kUInteger, + kString, + kHostPort, + kStringList, }; template class OptionsParser { - public: - virtual ~OptionsParser() = default; - - typedef Options TargetType; - - struct NoOp {}; - struct V8Option {}; - - // These methods add a single option to the parser. Optionally, it can be - // specified whether the option should be allowed from environment variable - // sources (i.e. NODE_OPTIONS). - void AddOption(const char* name, - const char* help_text, - bool Options::* field, - OptionEnvvarSettings env_setting = kDisallowedInEnvironment); - void AddOption(const char* name, - const char* help_text, - uint64_t Options::* field, - OptionEnvvarSettings env_setting = kDisallowedInEnvironment); - void AddOption(const char* name, - const char* help_text, - int64_t Options::* field, - OptionEnvvarSettings env_setting = kDisallowedInEnvironment); - void AddOption(const char* name, - const char* help_text, - std::string Options::* field, - OptionEnvvarSettings env_setting = kDisallowedInEnvironment); - void AddOption(const char* name, - const char* help_text, - std::vector Options::* field, - OptionEnvvarSettings env_setting = kDisallowedInEnvironment); - void AddOption(const char* name, - const char* help_text, - HostPort Options::* field, - OptionEnvvarSettings env_setting = kDisallowedInEnvironment); - void AddOption(const char* name, - const char* help_text, - NoOp no_op_tag, - OptionEnvvarSettings env_setting = kDisallowedInEnvironment); - void AddOption(const char* name, - const char* help_text, - V8Option v8_option_tag, - OptionEnvvarSettings env_setting = kDisallowedInEnvironment); - - // Adds aliases. An alias can be of the form "--option-a" -> "--option-b", - // or have a more complex group expansion, like - // "--option-a" -> { "--option-b", "--harmony-foobar", "--eval", "42" } - // If `from` has the form "--option-a=", the alias will only be expanded if - // the option is presented in that form (i.e. with a '='). - // If `from` has the form "--option-a ", the alias will only be expanded - // if the option has a non-option argument (not starting with -) following it. - void AddAlias(const char* from, const char* to); - void AddAlias(const char* from, const std::vector& to); - void AddAlias(const char* from, - const std::initializer_list& to); - - // Add implications from some arbitrary option to a boolean one, either - // in a way that makes `from` set `to` to true or to false. - void Implies(const char* from, const char* to); - void ImpliesNot(const char* from, const char* to); - - // Insert options from another options parser into this one, along with - // a method that yields the target options type from this parser's options - // type. - template - void Insert(const OptionsParser& child_options_parser, - ChildOptions* (Options::* get_child)()); - - // Parse a sequence of options into an options struct, a list of - // arguments that were parsed as options, a list of unknown/JS engine options, - // and leave the remainder in the input `args` vector. - // - // For example, an `args` input of - // - // node --foo --harmony-bar --fizzle=42 -- /path/to/cow moo - // - // expands as - // - // - `args` -> { "node", "/path/to/cow", "moo" } - // - `exec_args` -> { "--foo", "--harmony-bar", "--fizzle=42" } - // - `v8_args` -> `{ "node", "--harmony-bar" } - // - `options->foo == true`, `options->fizzle == 42`. - // - // If `*error` is set, the result of the parsing should be discarded and the - // contents of any of the argument vectors should be considered undefined. - void Parse(std::vector* const args, - std::vector* const exec_args, - std::vector* const v8_args, - Options* const options, - OptionEnvvarSettings required_env_settings, - std::vector* const errors) const; - - private: - // We support the wide variety of different option types by remembering - // how to access them, given a certain `Options` struct. - - // Represents a field within `Options`. - class BaseOptionField { - public: - virtual ~BaseOptionField() = default; - virtual void* LookupImpl(Options* options) const = 0; +public: + virtual ~OptionsParser() = default; + + typedef Options TargetType; + + struct NoOp {}; + struct V8Option {}; + + // These methods add a single option to the parser. Optionally, it can be + // specified whether the option should be allowed from environment variable + // sources (i.e. NODE_OPTIONS). + void AddOption(const char* name, + const char* help_text, + bool Options::* field, + OptionEnvvarSettings env_setting = kDisallowedInEnvironment); + void AddOption(const char* name, + const char* help_text, + uint64_t Options::* field, + OptionEnvvarSettings env_setting = kDisallowedInEnvironment); + void AddOption(const char* name, + const char* help_text, + int64_t Options::* field, + OptionEnvvarSettings env_setting = kDisallowedInEnvironment); + void AddOption(const char* name, + const char* help_text, + std::string Options::* field, + OptionEnvvarSettings env_setting = kDisallowedInEnvironment); + void AddOption(const char* name, + const char* help_text, + std::vector Options::* field, + OptionEnvvarSettings env_setting = kDisallowedInEnvironment); + void AddOption(const char* name, + const char* help_text, + HostPort Options::* field, + OptionEnvvarSettings env_setting = kDisallowedInEnvironment); + void AddOption(const char* name, + const char* help_text, + NoOp no_op_tag, + OptionEnvvarSettings env_setting = kDisallowedInEnvironment); + void AddOption(const char* name, + const char* help_text, + V8Option v8_option_tag, + OptionEnvvarSettings env_setting = kDisallowedInEnvironment); + + // Adds aliases. An alias can be of the form "--option-a" -> "--option-b", + // or have a more complex group expansion, like + // "--option-a" -> { "--option-b", "--harmony-foobar", "--eval", "42" } + // If `from` has the form "--option-a=", the alias will only be expanded if + // the option is presented in that form (i.e. with a '='). + // If `from` has the form "--option-a ", the alias will only be expanded + // if the option has a non-option argument (not starting with -) following it. + void AddAlias(const char* from, const char* to); + void AddAlias(const char* from, const std::vector& to); + void AddAlias(const char* from, + const std::initializer_list& to); + + // Add implications from some arbitrary option to a boolean one, either + // in a way that makes `from` set `to` to true or to false. + void Implies(const char* from, const char* to); + void ImpliesNot(const char* from, const char* to); + + // Insert options from another options parser into this one, along with + // a method that yields the target options type from this parser's options + // type. + template + void Insert(const OptionsParser& child_options_parser, + ChildOptions* (Options::* get_child)()); + + // Parse a sequence of options into an options struct, a list of + // arguments that were parsed as options, a list of unknown/JS engine options, + // and leave the remainder in the input `args` vector. + // + // For example, an `args` input of + // + // node --foo --harmony-bar --fizzle=42 -- /path/to/cow moo + // + // expands as + // + // - `args` -> { "node", "/path/to/cow", "moo" } + // - `exec_args` -> { "--foo", "--harmony-bar", "--fizzle=42" } + // - `v8_args` -> `{ "node", "--harmony-bar" } + // - `options->foo == true`, `options->fizzle == 42`. + // + // If `*error` is set, the result of the parsing should be discarded and the + // contents of any of the argument vectors should be considered undefined. + void Parse(std::vector* const args, + std::vector* const exec_args, + std::vector* const v8_args, + Options* const options, + OptionEnvvarSettings required_env_settings, + std::vector* const errors) const; + +private: + // We support the wide variety of different option types by remembering + // how to access them, given a certain `Options` struct. + + // Represents a field within `Options`. + class BaseOptionField { + public: + virtual ~BaseOptionField() = default; + virtual void* LookupImpl(Options* options) const = 0; + + template + inline T* Lookup(Options* options) const { + return static_cast(LookupImpl(options)); + } + }; + + // Represents a field of type T within `Options` that can be looked up + // as a C++ member field. + template + class SimpleOptionField : public BaseOptionField { + public: + explicit SimpleOptionField(T Options::* field) : field_(field) {} + void* LookupImpl(Options* options) const override { + return static_cast(&(options->*field_)); + } + + private: + T Options::* field_; + }; template - inline T* Lookup(Options* options) const { - return static_cast(LookupImpl(options)); - } - }; - - // Represents a field of type T within `Options` that can be looked up - // as a C++ member field. - template - class SimpleOptionField : public BaseOptionField { - public: - explicit SimpleOptionField(T Options::* field) : field_(field) {} - void* LookupImpl(Options* options) const override { - return static_cast(&(options->*field_)); + inline T* Lookup(std::shared_ptr field, + Options* options) const { + return field->template Lookup(options); } - private: - T Options::* field_; - }; - - template - inline T* Lookup(std::shared_ptr field, - Options* options) const { - return field->template Lookup(options); - } - - // An option consists of: - // - A type. - // - A way to store/access the property value. - // - The information of whether it may occur in an env var or not. - struct OptionInfo { - OptionType type; - std::shared_ptr field; - OptionEnvvarSettings env_setting; - std::string help_text; - }; - - // An implied option is composed of the information on where to store a - // specific boolean value (if another specific option is encountered). - struct Implication { - std::shared_ptr target_field; - bool target_value; - }; - - // These are helpers that make `Insert()` support properties of other - // options structs, if we know how to access them. - template - static auto Convert( - std::shared_ptr original, - ChildOptions* (Options::* get_child)()); - template - static auto Convert( - typename OptionsParser::OptionInfo original, - ChildOptions* (Options::* get_child)()); - template - static auto Convert( - typename OptionsParser::Implication original, - ChildOptions* (Options::* get_child)()); - - std::unordered_map options_; - std::unordered_map> aliases_; - std::unordered_multimap implications_; - - template - friend class OptionsParser; - - friend void GetOptions(const v8::FunctionCallbackInfo& args); + // An option consists of: + // - A type. + // - A way to store/access the property value. + // - The information of whether it may occur in an env var or not. + struct OptionInfo { + OptionType type; + std::shared_ptr field; + OptionEnvvarSettings env_setting; + std::string help_text; + }; + + // An implied option is composed of the information on where to store a + // specific boolean value (if another specific option is encountered). + struct Implication { + std::shared_ptr target_field; + bool target_value; + }; + + // These are helpers that make `Insert()` support properties of other + // options structs, if we know how to access them. + template + static auto Convert( + std::shared_ptr original, + ChildOptions* (Options::* get_child)()); + template + static auto Convert( + typename OptionsParser::OptionInfo original, + ChildOptions* (Options::* get_child)()); + template + static auto Convert( + typename OptionsParser::Implication original, + ChildOptions* (Options::* get_child)()); + + std::unordered_map options_; + std::unordered_map> aliases_; + std::unordered_multimap implications_; + + template + friend class OptionsParser; + + friend void GetOptions(const v8::FunctionCallbackInfo& args); }; using StringVector = std::vector; template void Parse( - StringVector* const args, StringVector* const exec_args, - StringVector* const v8_args, OptionsType* const options, - OptionEnvvarSettings required_env_settings, StringVector* const errors); + StringVector* const args, StringVector* const exec_args, + StringVector* const v8_args, OptionsType* const options, + OptionEnvvarSettings required_env_settings, StringVector* const errors); } // namespace options_parser diff --git a/test/benchmark/test-benchmark-process.js b/test/benchmark/test-benchmark-process.js index a73fc075bfcfa6..e366d037617cac 100644 --- a/test/benchmark/test-benchmark-process.js +++ b/test/benchmark/test-benchmark-process.js @@ -1,12 +1,9 @@ -'use strict'; +"use strict"; -require('../common'); +require("../common"); -const runBenchmark = require('../common/benchmark'); +const runBenchmark = require("../common/benchmark"); -runBenchmark('process', - [ - 'n=1', - 'type=raw', - 'operation=enumerate', - ], { NODEJS_BENCHMARK_ZERO_ALLOWED: 1 }); +runBenchmark("process", ["n=1", "type=raw", "operation=enumerate"], { + NODEJS_BENCHMARK_ZERO_ALLOWED: 1 +}); diff --git a/test/es-module/test-esm-type-flag-errors.js b/test/es-module/test-esm-type-flag-errors.js index a54a018ad9b774..fc0aaa7da9cdbf 100644 --- a/test/es-module/test-esm-type-flag-errors.js +++ b/test/es-module/test-esm-type-flag-errors.js @@ -1,52 +1,68 @@ -'use strict'; -const common = require('../common'); -const assert = require('assert'); -const exec = require('child_process').execFile; +"use strict"; +const common = require("../common"); +const assert = require("assert"); +const exec = require("child_process").execFile; -const mjsFile = require.resolve('../fixtures/es-modules/mjs-file.mjs'); -const cjsFile = require.resolve('../fixtures/es-modules/cjs-file.cjs'); -const packageWithoutTypeMain = - require.resolve('../fixtures/es-modules/package-without-type/index.js'); -const packageTypeCommonJsMain = - require.resolve('../fixtures/es-modules/package-type-commonjs/index.js'); -const packageTypeModuleMain = - require.resolve('../fixtures/es-modules/package-type-module/index.js'); +const mjsFile = require.resolve("../fixtures/es-modules/mjs-file.mjs"); +const cjsFile = require.resolve("../fixtures/es-modules/cjs-file.cjs"); +const packageWithoutTypeMain = require.resolve( + "../fixtures/es-modules/package-without-type/index.js" +); +const packageTypeCommonJsMain = require.resolve( + "../fixtures/es-modules/package-type-commonjs/index.js" +); +const packageTypeModuleMain = require.resolve( + "../fixtures/es-modules/package-type-module/index.js" +); // Check that running `node` without options works -expect('', mjsFile, '.mjs file'); -expect('', cjsFile, '.cjs file'); -expect('', packageTypeModuleMain, 'package-type-module'); -expect('', packageTypeCommonJsMain, 'package-type-commonjs'); -expect('', packageWithoutTypeMain, 'package-without-type'); +expect("", mjsFile, ".mjs file"); +expect("", cjsFile, ".cjs file"); +expect("", packageTypeModuleMain, "package-type-module"); +expect("", packageTypeCommonJsMain, "package-type-commonjs"); +expect("", packageWithoutTypeMain, "package-without-type"); // Check that --input-type isn't allowed for files -expect('--input-type=module', packageTypeModuleMain, - 'ERR_INPUT_TYPE_NOT_ALLOWED', true); +expect( + "--input-type=module", + packageTypeModuleMain, + "ERR_INPUT_TYPE_NOT_ALLOWED", + true +); try { - require('../fixtures/es-modules/package-type-module/index.js'); - assert.fail('Expected CJS to fail loading from type: module package.'); + require("../fixtures/es-modules/package-type-module/index.js"); + assert.fail("Expected CJS to fail loading from type: module package."); } catch (e) { - assert(e.toString().match(/Error \[ERR_REQUIRE_ESM\]: Must use import to load ES Module:/)); + assert( + e + .toString() + .match(/Error \[ERR_REQUIRE_ESM\]: Must use import to load ES Module:/) + ); } -function expect(opt = '', inputFile, want, wantsError = false) { +function expect(opt = "", inputFile, want, wantsError = false) { // TODO: Remove when --experimental-modules is unflagged opt = `--experimental-modules ${opt}`; const argv = [inputFile]; const opts = { env: Object.assign({}, process.env, { NODE_OPTIONS: opt }), - maxBuffer: 1e6, + maxBuffer: 1e6 }; - exec(process.execPath, argv, opts, common.mustCall((err, stdout, stderr) => { - if (wantsError) { - stdout = stderr; - } else { - assert.ifError(err); - } - if (stdout.includes(want)) return; + exec( + process.execPath, + argv, + opts, + common.mustCall((err, stdout, stderr) => { + if (wantsError) { + stdout = stderr; + } else { + assert.ifError(err); + } + if (stdout.includes(want)) return; - const o = JSON.stringify(opt); - assert.fail(`For ${o}, failed to find ${want} in: <\n${stdout}\n>`); - })); + const o = JSON.stringify(opt); + assert.fail(`For ${o}, failed to find ${want} in: <\n${stdout}\n>`); + }) + ); } diff --git a/test/parallel/test-atomics-wake.js b/test/parallel/test-atomics-wake.js index 0f387001764da2..2c0933673794ae 100644 --- a/test/parallel/test-atomics-wake.js +++ b/test/parallel/test-atomics-wake.js @@ -1,7 +1,7 @@ -'use strict'; +"use strict"; -require('../common'); -const assert = require('assert'); +require("../common"); +const assert = require("assert"); // https://github.com/nodejs/node/issues/21219 assert.strictEqual(Atomics.wake, undefined); diff --git a/test/parallel/test-http-parser-lazy-loaded.js b/test/parallel/test-http-parser-lazy-loaded.js index 3c4a7e7ce9f35d..6f5c81467d44d8 100644 --- a/test/parallel/test-http-parser-lazy-loaded.js +++ b/test/parallel/test-http-parser-lazy-loaded.js @@ -1,8 +1,8 @@ // Flags: --expose-internals -'use strict'; -const common = require('../common'); -const { internalBinding } = require('internal/test/binding'); +"use strict"; +const common = require("../common"); +const { internalBinding } = require("internal/test/binding"); // Monkey patch before requiring anything class DummyParser { @@ -15,12 +15,12 @@ class DummyParser { } DummyParser.REQUEST = Symbol(); -const binding = internalBinding('http_parser'); +const binding = internalBinding("http_parser"); binding.HTTPParser = DummyParser; -const assert = require('assert'); -const { spawn } = require('child_process'); -const { parsers } = require('_http_common'); +const assert = require("assert"); +const { spawn } = require("child_process"); +const { parsers } = require("_http_common"); // Test _http_common was not loaded before monkey patching const parser = parsers.alloc(); @@ -28,15 +28,20 @@ parser.initialize(DummyParser.REQUEST, {}); assert.strictEqual(parser instanceof DummyParser, true); assert.strictEqual(parser.test_type, DummyParser.REQUEST); -if (process.argv[2] !== 'child') { +if (process.argv[2] !== "child") { // Also test in a child process with IPC (specific case of https://github.com/nodejs/node/issues/23716) - const child = spawn(process.execPath, [ - '--expose-internals', __filename, 'child' - ], { - stdio: ['inherit', 'inherit', 'inherit', 'ipc'] - }); - child.on('exit', common.mustCall((code, signal) => { - assert.strictEqual(code, 0); - assert.strictEqual(signal, null); - })); + const child = spawn( + process.execPath, + ["--expose-internals", __filename, "child"], + { + stdio: ["inherit", "inherit", "inherit", "ipc"] + } + ); + child.on( + "exit", + common.mustCall((code, signal) => { + assert.strictEqual(code, 0); + assert.strictEqual(signal, null); + }) + ); } diff --git a/test/parallel/test-net-connect-buffer.js b/test/parallel/test-net-connect-buffer.js index 749eee519904f5..1f0ca341cafd75 100644 --- a/test/parallel/test-net-connect-buffer.js +++ b/test/parallel/test-net-connect-buffer.js @@ -19,59 +19,76 @@ // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE // USE OR OTHER DEALINGS IN THE SOFTWARE. -'use strict'; -const common = require('../common'); -const assert = require('assert'); -const net = require('net'); +"use strict"; +const common = require("../common"); +const assert = require("assert"); +const net = require("net"); -const tcp = net.Server(common.mustCall((s) => { - tcp.close(); +const tcp = net.Server( + common.mustCall(s => { + tcp.close(); - let buf = ''; - s.setEncoding('utf8'); - s.on('data', function(d) { - buf += d; - }); + let buf = ""; + s.setEncoding("utf8"); + s.on("data", function(d) { + buf += d; + }); - s.on('end', common.mustCall(function() { - console.error('SERVER: end', buf); - assert.strictEqual(buf, "L'État, c'est moi"); - s.end(); - })); -})); + s.on( + "end", + common.mustCall(function() { + console.error("SERVER: end", buf); + assert.strictEqual(buf, "L'État, c'est moi"); + s.end(); + }) + ); + }) +); -tcp.listen(0, common.mustCall(function() { - const socket = net.Stream({ highWaterMark: 0 }); +tcp.listen( + 0, + common.mustCall(function() { + const socket = net.Stream({ highWaterMark: 0 }); - let connected = false; - assert.strictEqual(socket.pending, true); - socket.connect(this.address().port, common.mustCall(() => connected = true)); + let connected = false; + assert.strictEqual(socket.pending, true); + socket.connect( + this.address().port, + common.mustCall(() => (connected = true)) + ); - assert.strictEqual(socket.pending, true); - assert.strictEqual(socket.connecting, true); - assert.strictEqual(socket.readyState, 'opening'); + assert.strictEqual(socket.pending, true); + assert.strictEqual(socket.connecting, true); + assert.strictEqual(socket.readyState, "opening"); - // Write a string that contains a multi-byte character sequence to test that - // `bytesWritten` is incremented with the # of bytes, not # of characters. - const a = "L'État, c'est "; - const b = 'moi'; + // Write a string that contains a multi-byte character sequence to test that + // `bytesWritten` is incremented with the # of bytes, not # of characters. + const a = "L'État, c'est "; + const b = "moi"; - // We're still connecting at this point so the datagram is first pushed onto - // the connect queue. Make sure that it's not added to `bytesWritten` again - // when the actual write happens. - const r = socket.write(a, common.mustCall((er) => { - console.error('write cb'); - assert.ok(connected); - assert.strictEqual(socket.bytesWritten, Buffer.from(a + b).length); - assert.strictEqual(socket.pending, false); - })); - socket.on('close', common.mustCall(() => { - assert.strictEqual(socket.pending, true); - })); + // We're still connecting at this point so the datagram is first pushed onto + // the connect queue. Make sure that it's not added to `bytesWritten` again + // when the actual write happens. + const r = socket.write( + a, + common.mustCall(er => { + console.error("write cb"); + assert.ok(connected); + assert.strictEqual(socket.bytesWritten, Buffer.from(a + b).length); + assert.strictEqual(socket.pending, false); + }) + ); + socket.on( + "close", + common.mustCall(() => { + assert.strictEqual(socket.pending, true); + }) + ); - assert.strictEqual(socket.bytesWritten, Buffer.from(a).length); - assert.strictEqual(r, false); - socket.end(b); + assert.strictEqual(socket.bytesWritten, Buffer.from(a).length); + assert.strictEqual(r, false); + socket.end(b); - assert.strictEqual(socket.readyState, 'opening'); -})); + assert.strictEqual(socket.readyState, "opening"); + }) +); diff --git a/test/parallel/test-net-socket-write-error.js b/test/parallel/test-net-socket-write-error.js index 178cebe9903cfb..ad23ebf4e81d17 100644 --- a/test/parallel/test-net-socket-write-error.js +++ b/test/parallel/test-net-socket-write-error.js @@ -1,19 +1,19 @@ -'use strict'; +"use strict"; -const common = require('../common'); -const net = require('net'); +const common = require("../common"); +const net = require("net"); const server = net.createServer().listen(0, connectToServer); function connectToServer() { - const client = net.createConnection(this.address().port, () => { - common.expectsError(() => client.write(1337), - { - code: 'ERR_INVALID_ARG_TYPE', - type: TypeError - }); + const client = net + .createConnection(this.address().port, () => { + common.expectsError(() => client.write(1337), { + code: "ERR_INVALID_ARG_TYPE", + type: TypeError + }); - client.destroy(); - }) - .on('close', () => server.close()); + client.destroy(); + }) + .on("close", () => server.close()); } diff --git a/test/parallel/test-net-write-arguments.js b/test/parallel/test-net-write-arguments.js index 19b037ee0fc94c..2459a067bfa295 100644 --- a/test/parallel/test-net-write-arguments.js +++ b/test/parallel/test-net-write-arguments.js @@ -1,33 +1,28 @@ -'use strict'; -const common = require('../common'); -const net = require('net'); +"use strict"; +const common = require("../common"); +const net = require("net"); const socket = net.Stream({ highWaterMark: 0 }); // Make sure that anything besides a buffer or a string throws. -common.expectsError(() => socket.write(null), - { - code: 'ERR_STREAM_NULL_VALUES', - type: TypeError, - message: 'May not write null values to stream' - }); -[ - true, - false, - undefined, - 1, - 1.0, - +Infinity, - -Infinity, - [], - {} -].forEach((value) => { - // We need to check the callback since 'error' will only - // be emitted once per instance. - socket.write(value, common.expectsError({ - code: 'ERR_INVALID_ARG_TYPE', - type: TypeError, - message: 'The "chunk" argument must be one of type string or Buffer. ' + - `Received type ${typeof value}` - })); +common.expectsError(() => socket.write(null), { + code: "ERR_STREAM_NULL_VALUES", + type: TypeError, + message: "May not write null values to stream" }); +[true, false, undefined, 1, 1.0, +Infinity, -Infinity, [], {}].forEach( + value => { + // We need to check the callback since 'error' will only + // be emitted once per instance. + socket.write( + value, + common.expectsError({ + code: "ERR_INVALID_ARG_TYPE", + type: TypeError, + message: + 'The "chunk" argument must be one of type string or Buffer. ' + + `Received type ${typeof value}` + }) + ); + } +); diff --git a/test/parallel/test-policy-integrity.js b/test/parallel/test-policy-integrity.js index 2cc1f1280c8059..af470a153ca7b6 100644 --- a/test/parallel/test-policy-integrity.js +++ b/test/parallel/test-policy-integrity.js @@ -1,39 +1,39 @@ -'use strict'; +"use strict"; -const common = require('../common'); -if (!common.hasCrypto) - common.skip('missing crypto'); +const common = require("../common"); +if (!common.hasCrypto) common.skip("missing crypto"); -const tmpdir = require('../common/tmpdir'); -const assert = require('assert'); -const { spawnSync } = require('child_process'); -const crypto = require('crypto'); -const fs = require('fs'); -const path = require('path'); -const { pathToFileURL } = require('url'); +const tmpdir = require("../common/tmpdir"); +const assert = require("assert"); +const { spawnSync } = require("child_process"); +const crypto = require("crypto"); +const fs = require("fs"); +const path = require("path"); +const { pathToFileURL } = require("url"); tmpdir.refresh(); function hash(algo, body) { const h = crypto.createHash(algo); h.update(body); - return h.digest('base64'); + return h.digest("base64"); } -const policyFilepath = path.join(tmpdir.path, 'policy'); +const policyFilepath = path.join(tmpdir.path, "policy"); -const packageFilepath = path.join(tmpdir.path, 'package.json'); +const packageFilepath = path.join(tmpdir.path, "package.json"); const packageURL = pathToFileURL(packageFilepath); const packageBody = '{"main": "dep.js"}'; -const policyToPackageRelativeURLString = `./${ - path.relative(path.dirname(policyFilepath), packageFilepath) -}`; +const policyToPackageRelativeURLString = `./${path.relative( + path.dirname(policyFilepath), + packageFilepath +)}`; -const parentFilepath = path.join(tmpdir.path, 'parent.js'); +const parentFilepath = path.join(tmpdir.path, "parent.js"); const parentURL = pathToFileURL(parentFilepath); -const parentBody = 'require(\'./dep.js\')'; +const parentBody = "require('./dep.js')"; -const workerSpawningFilepath = path.join(tmpdir.path, 'worker_spawner.js'); +const workerSpawningFilepath = path.join(tmpdir.path, "worker_spawner.js"); const workerSpawningURL = pathToFileURL(workerSpawningFilepath); const workerSpawningBody = ` const { Worker } = require('worker_threads'); @@ -44,19 +44,20 @@ const w = new Worker(${JSON.stringify(parentFilepath)}); w.on('exit', process.exit); `; -const depFilepath = path.join(tmpdir.path, 'dep.js'); +const depFilepath = path.join(tmpdir.path, "dep.js"); const depURL = pathToFileURL(depFilepath); -const depBody = ''; -const policyToDepRelativeURLString = `./${ - path.relative(path.dirname(policyFilepath), depFilepath) -}`; +const depBody = ""; +const policyToDepRelativeURLString = `./${path.relative( + path.dirname(policyFilepath), + depFilepath +)}`; fs.writeFileSync(parentFilepath, parentBody); fs.writeFileSync(depFilepath, depBody); const tmpdirURL = pathToFileURL(tmpdir.path); -if (!tmpdirURL.pathname.endsWith('/')) { - tmpdirURL.pathname += '/'; +if (!tmpdirURL.pathname.endsWith("/")) { + tmpdirURL.pathname += "/"; } function test({ shouldFail = false, @@ -71,15 +72,16 @@ function test({ }; for (const [url, { body, match }] of Object.entries(resources)) { manifest.resources[url] = { - integrity: `sha256-${hash('sha256', match ? body : body + '\n')}`, + integrity: `sha256-${hash("sha256", match ? body : body + "\n")}`, dependencies: true }; fs.writeFileSync(new URL(url, tmpdirURL.href), body); } fs.writeFileSync(policyFilepath, JSON.stringify(manifest, null, 2)); const { status } = spawnSync(process.execPath, [ - '--experimental-policy', policyFilepath, - ...preload.map((m) => ['-r', m]).flat(), + "--experimental-policy", + policyFilepath, + ...preload.map(m => ["-r", m]).flat(), entry ]); if (shouldFail) { @@ -90,56 +92,64 @@ function test({ } { - const { status } = spawnSync(process.execPath, [ - '--experimental-policy', policyFilepath, - '--experimental-policy', policyFilepath - ], { - stdio: 'pipe' - }); - assert.notStrictEqual(status, 0, 'Should not allow multiple policies'); + const { status } = spawnSync( + process.execPath, + [ + "--experimental-policy", + policyFilepath, + "--experimental-policy", + policyFilepath + ], + { + stdio: "pipe" + } + ); + assert.notStrictEqual(status, 0, "Should not allow multiple policies"); } { - const enoentFilepath = path.join(tmpdir.path, 'enoent'); - try { fs.unlinkSync(enoentFilepath); } catch {} - const { status } = spawnSync(process.execPath, [ - '--experimental-policy', enoentFilepath, '-e', '' - ], { - stdio: 'pipe' - }); - assert.notStrictEqual(status, 0, 'Should not allow missing policies'); + const enoentFilepath = path.join(tmpdir.path, "enoent"); + try { + fs.unlinkSync(enoentFilepath); + } catch {} + const { status } = spawnSync( + process.execPath, + ["--experimental-policy", enoentFilepath, "-e", ""], + { + stdio: "pipe" + } + ); + assert.notStrictEqual(status, 0, "Should not allow missing policies"); } test({ shouldFail: true, entry: parentFilepath, - resources: { - } + resources: {} }); test({ shouldFail: false, entry: parentFilepath, - onerror: 'log', + onerror: "log" }); test({ shouldFail: true, entry: parentFilepath, - onerror: 'exit', + onerror: "exit" }); test({ shouldFail: true, entry: parentFilepath, - onerror: 'throw', + onerror: "throw" }); test({ shouldFail: true, entry: parentFilepath, - onerror: 'unknown-onerror-value', + onerror: "unknown-onerror-value" }); test({ shouldFail: true, entry: path.dirname(packageFilepath), - resources: { - } + resources: {} }); test({ shouldFail: true, @@ -147,22 +157,22 @@ test({ resources: { [depURL]: { body: depBody, - match: true, + match: true } } }); test({ shouldFail: false, entry: path.dirname(packageFilepath), - onerror: 'log', + onerror: "log", resources: { [packageURL]: { body: packageBody, - match: false, + match: false }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -172,11 +182,11 @@ test({ resources: { [packageURL]: { body: packageBody, - match: false, + match: false }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -186,11 +196,11 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: false, + match: false } } }); @@ -200,11 +210,11 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -214,15 +224,15 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [parentURL]: { body: parentBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -233,15 +243,15 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [parentURL]: { body: parentBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -251,11 +261,11 @@ test({ resources: { [parentURL]: { body: parentBody, - match: false, + match: false }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -265,11 +275,11 @@ test({ resources: { [parentURL]: { body: parentBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: false, + match: false } } }); @@ -279,7 +289,7 @@ test({ resources: { [parentURL]: { body: parentBody, - match: true, + match: true } } }); @@ -289,11 +299,11 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -303,11 +313,11 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [policyToDepRelativeURLString]: { body: depBody, - match: true, + match: true } } }); @@ -317,7 +327,7 @@ test({ resources: { [policyToDepRelativeURLString]: { body: depBody, - match: false, + match: false } } }); @@ -327,15 +337,15 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [policyToDepRelativeURLString]: { body: depBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -345,15 +355,15 @@ test({ resources: { [policyToPackageRelativeURLString]: { body: packageBody, - match: true, + match: true }, [packageURL]: { body: packageBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: false, + match: false } } }); @@ -363,8 +373,8 @@ test({ resources: { [workerSpawningURL]: { body: workerSpawningBody, - match: true, - }, + match: true + } } }); test({ @@ -373,19 +383,19 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [workerSpawningURL]: { body: workerSpawningBody, - match: true, + match: true }, [parentURL]: { body: parentBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: true, + match: true } } }); @@ -396,19 +406,19 @@ test({ resources: { [packageURL]: { body: packageBody, - match: true, + match: true }, [workerSpawningURL]: { body: workerSpawningBody, - match: true, + match: true }, [parentURL]: { body: parentBody, - match: true, + match: true }, [depURL]: { body: depBody, - match: true, + match: true } } }); diff --git a/test/parallel/test-policy-parse-integrity.js b/test/parallel/test-policy-parse-integrity.js index 6fa95416390693..d0f620cc7f1bce 100644 --- a/test/parallel/test-policy-parse-integrity.js +++ b/test/parallel/test-policy-parse-integrity.js @@ -1,42 +1,42 @@ -'use strict'; +"use strict"; -const common = require('../common'); -if (!common.hasCrypto) common.skip('missing crypto'); +const common = require("../common"); +if (!common.hasCrypto) common.skip("missing crypto"); -const tmpdir = require('../common/tmpdir'); -const assert = require('assert'); -const { spawnSync } = require('child_process'); -const crypto = require('crypto'); -const fs = require('fs'); -const path = require('path'); -const { pathToFileURL } = require('url'); +const tmpdir = require("../common/tmpdir"); +const assert = require("assert"); +const { spawnSync } = require("child_process"); +const crypto = require("crypto"); +const fs = require("fs"); +const path = require("path"); +const { pathToFileURL } = require("url"); tmpdir.refresh(); function hash(algo, body) { const h = crypto.createHash(algo); h.update(body); - return h.digest('base64'); + return h.digest("base64"); } -const policyFilepath = path.join(tmpdir.path, 'policy'); +const policyFilepath = path.join(tmpdir.path, "policy"); -const parentFilepath = path.join(tmpdir.path, 'parent.js'); +const parentFilepath = path.join(tmpdir.path, "parent.js"); const parentBody = "require('./dep.js')"; -const depFilepath = path.join(tmpdir.path, 'dep.js'); +const depFilepath = path.join(tmpdir.path, "dep.js"); const depURL = pathToFileURL(depFilepath); -const depBody = ''; +const depBody = ""; fs.writeFileSync(parentFilepath, parentBody); fs.writeFileSync(depFilepath, depBody); const tmpdirURL = pathToFileURL(tmpdir.path); -if (!tmpdirURL.pathname.endsWith('/')) { - tmpdirURL.pathname += '/'; +if (!tmpdirURL.pathname.endsWith("/")) { + tmpdirURL.pathname += "/"; } -const packageFilepath = path.join(tmpdir.path, 'package.json'); +const packageFilepath = path.join(tmpdir.path, "package.json"); const packageURL = pathToFileURL(packageFilepath); const packageBody = '{"main": "dep.js"}'; @@ -44,7 +44,7 @@ function test({ shouldFail, integrity }) { const resources = { [packageURL]: { body: packageBody, - integrity: `sha256-${hash('sha256', packageBody)}` + integrity: `sha256-${hash("sha256", packageBody)}` }, [depURL]: { body: depBody, @@ -52,17 +52,17 @@ function test({ shouldFail, integrity }) { } }; const manifest = { - resources: {}, + resources: {} }; for (const [url, { body, integrity }] of Object.entries(resources)) { manifest.resources[url] = { - integrity, + integrity }; fs.writeFileSync(new URL(url, tmpdirURL.href), body); } fs.writeFileSync(policyFilepath, JSON.stringify(manifest, null, 2)); const { status } = spawnSync(process.execPath, [ - '--experimental-policy', + "--experimental-policy", policyFilepath, depFilepath ]); @@ -75,20 +75,20 @@ function test({ shouldFail, integrity }) { test({ shouldFail: false, - integrity: `sha256-${hash('sha256', depBody)}`, + integrity: `sha256-${hash("sha256", depBody)}` }); test({ shouldFail: true, - integrity: `1sha256-${hash('sha256', depBody)}`, + integrity: `1sha256-${hash("sha256", depBody)}` }); test({ shouldFail: true, - integrity: 'hoge', + integrity: "hoge" }); test({ shouldFail: true, - integrity: `sha256-${hash('sha256', depBody)}sha256-${hash( - 'sha256', + integrity: `sha256-${hash("sha256", depBody)}sha256-${hash( + "sha256", depBody - )}`, + )}` }); diff --git a/test/parallel/test-process-versions.js b/test/parallel/test-process-versions.js index 14484293dc4621..fb5fe9c0c56d8e 100644 --- a/test/parallel/test-process-versions.js +++ b/test/parallel/test-process-versions.js @@ -1,19 +1,29 @@ -'use strict'; -const common = require('../common'); -const assert = require('assert'); - -const expected_keys = ['ares', 'brotli', 'modules', 'node', - 'uv', 'v8', 'zlib', 'nghttp2', 'napi', 'llhttp']; +"use strict"; +const common = require("../common"); +const assert = require("assert"); + +const expected_keys = [ + "ares", + "brotli", + "modules", + "node", + "uv", + "v8", + "zlib", + "nghttp2", + "napi", + "llhttp" +]; if (common.hasCrypto) { - expected_keys.push('openssl'); + expected_keys.push("openssl"); } if (common.hasIntl) { - expected_keys.push('icu'); - expected_keys.push('cldr'); - expected_keys.push('tz'); - expected_keys.push('unicode'); + expected_keys.push("icu"); + expected_keys.push("cldr"); + expected_keys.push("tz"); + expected_keys.push("unicode"); } expected_keys.sort(); @@ -30,8 +40,11 @@ assert(commonTemplate.test(process.versions.node)); assert(commonTemplate.test(process.versions.uv)); assert(commonTemplate.test(process.versions.zlib)); -assert(/^\d+\.\d+\.\d+(?:\.\d+)?-node\.\d+(?: \(candidate\))?$/ - .test(process.versions.v8)); +assert( + /^\d+\.\d+\.\d+(?:\.\d+)?-node\.\d+(?: \(candidate\))?$/.test( + process.versions.v8 + ) +); assert(/^\d+$/.test(process.versions.modules)); if (common.hasCrypto) { @@ -44,5 +57,7 @@ for (let i = 0; i < expected_keys.length; i++) { assert.strictEqual(descriptor.writable, false); } -assert.strictEqual(process.config.variables.napi_build_version, - process.versions.napi); +assert.strictEqual( + process.config.variables.napi_build_version, + process.versions.napi +); diff --git a/test/parallel/test-stream-writable-write-writev-finish.js b/test/parallel/test-stream-writable-write-writev-finish.js index aa43b1490c8600..ac609db8c851bc 100644 --- a/test/parallel/test-stream-writable-write-writev-finish.js +++ b/test/parallel/test-stream-writable-write-writev-finish.js @@ -1,8 +1,8 @@ -'use strict'; +"use strict"; -const common = require('../common'); -const assert = require('assert'); -const stream = require('stream'); +const common = require("../common"); +const assert = require("assert"); +const stream = require("stream"); // Ensure consistency between the finish event when using cork() // and writev and when not using them @@ -11,56 +11,65 @@ const stream = require('stream'); const writable = new stream.Writable(); writable._write = (chunks, encoding, cb) => { - cb(new Error('write test error')); + cb(new Error("write test error")); }; - writable.on('finish', common.mustNotCall()); - writable.on('prefinish', common.mustNotCall()); - writable.on('error', common.mustCall((er) => { - assert.strictEqual(er.message, 'write test error'); - })); + writable.on("finish", common.mustNotCall()); + writable.on("prefinish", common.mustNotCall()); + writable.on( + "error", + common.mustCall(er => { + assert.strictEqual(er.message, "write test error"); + }) + ); - writable.end('test'); + writable.end("test"); } { const writable = new stream.Writable(); writable._write = (chunks, encoding, cb) => { - setImmediate(cb, new Error('write test error')); + setImmediate(cb, new Error("write test error")); }; - writable.on('finish', common.mustNotCall()); - writable.on('prefinish', common.mustNotCall()); - writable.on('error', common.mustCall((er) => { - assert.strictEqual(er.message, 'write test error'); - })); + writable.on("finish", common.mustNotCall()); + writable.on("prefinish", common.mustNotCall()); + writable.on( + "error", + common.mustCall(er => { + assert.strictEqual(er.message, "write test error"); + }) + ); - writable.end('test'); + writable.end("test"); } { const writable = new stream.Writable(); writable._write = (chunks, encoding, cb) => { - cb(new Error('write test error')); + cb(new Error("write test error")); }; writable._writev = (chunks, cb) => { - cb(new Error('writev test error')); + cb(new Error("writev test error")); }; - writable.on('finish', common.mustNotCall()); - writable.on('prefinish', common.mustNotCall()); - writable.on('error', common.mustCall((er) => { - assert.strictEqual(er.message, 'writev test error'); - })); + writable.on("finish", common.mustNotCall()); + writable.on("prefinish", common.mustNotCall()); + writable.on( + "error", + common.mustCall(er => { + assert.strictEqual(er.message, "writev test error"); + }) + ); writable.cork(); - writable.write('test'); + writable.write("test"); setImmediate(function() { - writable.end('test'); + writable.end("test"); }); } @@ -68,24 +77,27 @@ const stream = require('stream'); const writable = new stream.Writable(); writable._write = (chunks, encoding, cb) => { - setImmediate(cb, new Error('write test error')); + setImmediate(cb, new Error("write test error")); }; writable._writev = (chunks, cb) => { - setImmediate(cb, new Error('writev test error')); + setImmediate(cb, new Error("writev test error")); }; - writable.on('finish', common.mustNotCall()); - writable.on('prefinish', common.mustNotCall()); - writable.on('error', common.mustCall((er) => { - assert.strictEqual(er.message, 'writev test error'); - })); + writable.on("finish", common.mustNotCall()); + writable.on("prefinish", common.mustNotCall()); + writable.on( + "error", + common.mustCall(er => { + assert.strictEqual(er.message, "writev test error"); + }) + ); writable.cork(); - writable.write('test'); + writable.write("test"); setImmediate(function() { - writable.end('test'); + writable.end("test"); }); } @@ -94,14 +106,14 @@ const stream = require('stream'); { const rs = new stream.Readable(); - rs.push('ok'); + rs.push("ok"); rs.push(null); rs._read = () => {}; const ws = new stream.Writable(); - ws.on('finish', common.mustNotCall()); - ws.on('error', common.mustCall()); + ws.on("finish", common.mustNotCall()); + ws.on("error", common.mustCall()); ws._write = (chunk, encoding, done) => { setImmediate(done, new Error()); @@ -111,14 +123,14 @@ const stream = require('stream'); { const rs = new stream.Readable(); - rs.push('ok'); + rs.push("ok"); rs.push(null); rs._read = () => {}; const ws = new stream.Writable(); - ws.on('finish', common.mustNotCall()); - ws.on('error', common.mustCall()); + ws.on("finish", common.mustNotCall()); + ws.on("error", common.mustCall()); ws._write = (chunk, encoding, done) => { done(new Error()); @@ -131,8 +143,8 @@ const stream = require('stream'); w._write = (chunk, encoding, cb) => { process.nextTick(cb); }; - w.on('error', common.mustCall()); - w.on('prefinish', () => { + w.on("error", common.mustCall()); + w.on("prefinish", () => { w.write("shouldn't write in prefinish listener"); }); w.end(); @@ -143,8 +155,8 @@ const stream = require('stream'); w._write = (chunk, encoding, cb) => { process.nextTick(cb); }; - w.on('error', common.mustCall()); - w.on('finish', () => { + w.on("error", common.mustCall()); + w.on("finish", () => { w.write("shouldn't write in finish listener"); }); w.end(); diff --git a/test/sequential/test-http-max-http-headers.js b/test/sequential/test-http-max-http-headers.js index 9ee4d8c352928b..35c2bb375950ab 100644 --- a/test/sequential/test-http-max-http-headers.js +++ b/test/sequential/test-http-max-http-headers.js @@ -1,15 +1,15 @@ // Flags: --expose-internals -'use strict'; -const common = require('../common'); -const assert = require('assert'); -const http = require('http'); -const net = require('net'); +"use strict"; +const common = require("../common"); +const assert = require("assert"); +const http = require("http"); +const net = require("net"); const MAX = +(process.argv[2] || 8 * 1024); // Command line option, or 8KB. -const { getOptionValue } = require('internal/options'); +const { getOptionValue } = require("internal/options"); -console.log('pid is', process.pid); -console.log('max header size is', getOptionValue('--max-http-header-size')); +console.log("pid is", process.pid); +console.log("max header size is", getOptionValue("--max-http-header-size")); // Verify that we cannot receive more than 8KB of headers. @@ -24,7 +24,7 @@ function once(cb) { } function finished(client, callback) { - 'abort error end'.split(' ').forEach((e) => { + "abort error end".split(" ").forEach(e => { client.on(e, once(() => setImmediate(callback))); }); } @@ -33,14 +33,14 @@ function fillHeaders(headers, currentSize, valid = false) { // `llhttp` counts actual header name/value sizes, excluding the whitespace // and stripped chars. // OK, Content-Length, 0, X-CRASH, aaa... - headers += 'a'.repeat(MAX - currentSize); + headers += "a".repeat(MAX - currentSize); // Generate valid headers if (valid) { // TODO(mcollina): understand why -32 is needed instead of -1 headers = headers.slice(0, -32); } - return headers + '\r\n\r\n'; + return headers + "\r\n\r\n"; } function writeHeaders(socket, headers) { @@ -55,21 +55,23 @@ function writeHeaders(socket, headers) { } // Safety check we are chunking correctly - assert.strictEqual(array.join(''), headers); + assert.strictEqual(array.join(""), headers); next(); function next() { if (socket.destroyed) { - console.log('socket was destroyed early, data left to write:', - array.join('').length); + console.log( + "socket was destroyed early, data left to write:", + array.join("").length + ); return; } const chunk = array.shift(); if (chunk) { - console.log('writing chunk of size', chunk.length); + console.log("writing chunk of size", chunk.length); socket.write(chunk, next); } else { socket.end(); @@ -78,44 +80,47 @@ function writeHeaders(socket, headers) { } function test1() { - console.log('test1'); - let headers = - 'HTTP/1.1 200 OK\r\n' + - 'Content-Length: 0\r\n' + - 'X-CRASH: '; + console.log("test1"); + let headers = "HTTP/1.1 200 OK\r\n" + "Content-Length: 0\r\n" + "X-CRASH: "; // OK, Content-Length, 0, X-CRASH, aaa... const currentSize = 2 + 14 + 1 + 7; headers = fillHeaders(headers, currentSize); - const server = net.createServer((sock) => { - sock.once('data', (chunk) => { + const server = net.createServer(sock => { + sock.once("data", chunk => { writeHeaders(sock, headers); sock.resume(); }); // The socket might error but that's ok - sock.on('error', () => {}); + sock.on("error", () => {}); }); - server.listen(0, common.mustCall(() => { - const port = server.address().port; - const client = http.get({ port: port }, common.mustNotCall()); - - client.on('error', common.mustCall((err) => { - assert.strictEqual(err.code, 'HPE_HEADER_OVERFLOW'); - server.close(test2); - })); - })); + server.listen( + 0, + common.mustCall(() => { + const port = server.address().port; + const client = http.get({ port: port }, common.mustNotCall()); + + client.on( + "error", + common.mustCall(err => { + assert.strictEqual(err.code, "HPE_HEADER_OVERFLOW"); + server.close(test2); + }) + ); + }) + ); } const test2 = common.mustCall(() => { - console.log('test2'); + console.log("test2"); let headers = - 'GET / HTTP/1.1\r\n' + - 'Host: localhost\r\n' + - 'Agent: nod2\r\n' + - 'X-CRASH: '; + "GET / HTTP/1.1\r\n" + + "Host: localhost\r\n" + + "Agent: nod2\r\n" + + "X-CRASH: "; // /, Host, localhost, Agent, node, X-CRASH, a... const currentSize = 1 + 4 + 9 + 5 + 4 + 7; @@ -123,59 +128,73 @@ const test2 = common.mustCall(() => { const server = http.createServer(common.mustNotCall()); - server.once('clientError', common.mustCall((err) => { - assert.strictEqual(err.code, 'HPE_HEADER_OVERFLOW'); - })); - - server.listen(0, common.mustCall(() => { - const client = net.connect(server.address().port); - client.on('connect', () => { - writeHeaders(client, headers); - client.resume(); - }); - - finished(client, common.mustCall((err) => { - server.close(test3); - })); - })); + server.once( + "clientError", + common.mustCall(err => { + assert.strictEqual(err.code, "HPE_HEADER_OVERFLOW"); + }) + ); + + server.listen( + 0, + common.mustCall(() => { + const client = net.connect(server.address().port); + client.on("connect", () => { + writeHeaders(client, headers); + client.resume(); + }); + + finished( + client, + common.mustCall(err => { + server.close(test3); + }) + ); + }) + ); }); const test3 = common.mustCall(() => { - console.log('test3'); + console.log("test3"); let headers = - 'GET / HTTP/1.1\r\n' + - 'Host: localhost\r\n' + - 'Agent: nod3\r\n' + - 'X-CRASH: '; + "GET / HTTP/1.1\r\n" + + "Host: localhost\r\n" + + "Agent: nod3\r\n" + + "X-CRASH: "; // /, Host, localhost, Agent, node, X-CRASH, a... const currentSize = 1 + 4 + 9 + 5 + 4 + 7; headers = fillHeaders(headers, currentSize, true); - console.log('writing', headers.length); + console.log("writing", headers.length); - const server = http.createServer(common.mustCall((req, res) => { - res.end('hello from test3 server'); - server.close(); - })); + const server = http.createServer( + common.mustCall((req, res) => { + res.end("hello from test3 server"); + server.close(); + }) + ); - server.on('clientError', (err) => { + server.on("clientError", err => { console.log(err.code); - if (err.code === 'HPE_HEADER_OVERFLOW') { - console.log(err.rawPacket.toString('hex')); + if (err.code === "HPE_HEADER_OVERFLOW") { + console.log(err.rawPacket.toString("hex")); } }); - server.on('clientError', common.mustNotCall()); - - server.listen(0, common.mustCall(() => { - const client = net.connect(server.address().port); - client.on('connect', () => { - writeHeaders(client, headers); - client.resume(); - }); - - client.pipe(process.stdout); - })); + server.on("clientError", common.mustNotCall()); + + server.listen( + 0, + common.mustCall(() => { + const client = net.connect(server.address().port); + client.on("connect", () => { + writeHeaders(client, headers); + client.resume(); + }); + + client.pipe(process.stdout); + }) + ); }); test1(); diff --git a/test/sequential/test-set-http-max-http-headers.js b/test/sequential/test-set-http-max-http-headers.js index cfe1ed69537743..59b4ae8fb6a719 100644 --- a/test/sequential/test-set-http-max-http-headers.js +++ b/test/sequential/test-set-http-max-http-headers.js @@ -1,10 +1,10 @@ -'use strict'; +"use strict"; -const common = require('../common'); -const assert = require('assert'); -const { spawn } = require('child_process'); -const path = require('path'); -const testName = path.join(__dirname, 'test-http-max-http-headers.js'); +const common = require("../common"); +const assert = require("assert"); +const { spawn } = require("child_process"); +const path = require("path"); +const testName = path.join(__dirname, "test-http-max-http-headers.js"); const timeout = common.platformTimeout(100); @@ -15,77 +15,89 @@ function test(fn) { } test(function(cb) { - console.log('running subtest expecting failure'); + console.log("running subtest expecting failure"); // Validate that the test fails if the max header size is too small. - const args = ['--expose-internals', - '--max-http-header-size=1024', - testName]; - const cp = spawn(process.execPath, args, { stdio: 'inherit' }); - - cp.on('close', common.mustCall((code, signal) => { - assert.strictEqual(code, 1); - assert.strictEqual(signal, null); - cb(); - })); + const args = ["--expose-internals", "--max-http-header-size=1024", testName]; + const cp = spawn(process.execPath, args, { stdio: "inherit" }); + + cp.on( + "close", + common.mustCall((code, signal) => { + assert.strictEqual(code, 1); + assert.strictEqual(signal, null); + cb(); + }) + ); }); test(function(cb) { - console.log('running subtest expecting success'); + console.log("running subtest expecting success"); const env = Object.assign({}, process.env, { - NODE_DEBUG: 'http' + NODE_DEBUG: "http" }); // Validate that the test fails if the max header size is too small. // Validate that the test now passes if the same limit becomes large enough. - const args = ['--expose-internals', - '--max-http-header-size=1024', - testName, - '1024']; + const args = [ + "--expose-internals", + "--max-http-header-size=1024", + testName, + "1024" + ]; const cp = spawn(process.execPath, args, { env, - stdio: 'inherit' + stdio: "inherit" }); - cp.on('close', common.mustCall((code, signal) => { - assert.strictEqual(code, 0); - assert.strictEqual(signal, null); - cb(); - })); + cp.on( + "close", + common.mustCall((code, signal) => { + assert.strictEqual(code, 0); + assert.strictEqual(signal, null); + cb(); + }) + ); }); // Next, repeat the same checks using NODE_OPTIONS if it is supported. if (!process.config.variables.node_without_node_options) { const env = Object.assign({}, process.env, { - NODE_OPTIONS: '--max-http-header-size=1024' + NODE_OPTIONS: "--max-http-header-size=1024" }); test(function(cb) { - console.log('running subtest expecting failure'); + console.log("running subtest expecting failure"); // Validate that the test fails if the max header size is too small. - const args = ['--expose-internals', testName]; - const cp = spawn(process.execPath, args, { env, stdio: 'inherit' }); - - cp.on('close', common.mustCall((code, signal) => { - assert.strictEqual(code, 1); - assert.strictEqual(signal, null); - cb(); - })); + const args = ["--expose-internals", testName]; + const cp = spawn(process.execPath, args, { env, stdio: "inherit" }); + + cp.on( + "close", + common.mustCall((code, signal) => { + assert.strictEqual(code, 1); + assert.strictEqual(signal, null); + cb(); + }) + ); }); test(function(cb) { // Validate that the test now passes if the same limit // becomes large enough. - const args = ['--expose-internals', testName, '1024']; - const cp = spawn(process.execPath, args, { env, stdio: 'inherit' }); - - cp.on('close', common.mustCall((code, signal) => { - assert.strictEqual(code, 0); - assert.strictEqual(signal, null); - cb(); - })); + const args = ["--expose-internals", testName, "1024"]; + const cp = spawn(process.execPath, args, { env, stdio: "inherit" }); + + cp.on( + "close", + common.mustCall((code, signal) => { + assert.strictEqual(code, 0); + assert.strictEqual(signal, null); + cb(); + }) + ); }); } diff --git a/tools/license-builder.sh b/tools/license-builder.sh index 60f38e5cebf694..eb20a79729138a 100755 --- a/tools/license-builder.sh +++ b/tools/license-builder.sh @@ -3,109 +3,106 @@ set -e rootdir="$(CDPATH= cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" -licensefile="${rootdir}/LICENSE" -licensehead="$(sed '/^- /,$d' ${licensefile})" -tmplicense="${rootdir}/~LICENSE.$$" -echo -e "$licensehead" > $tmplicense - +licensefile="$rootdir/LICENSE" +licensehead="$(sed '/^- /,$d' "$licensefile")" +tmplicense="$rootdir/~LICENSE.$$" +echo -e "$licensehead" >"$tmplicense" # addlicense -function addlicense { +function addlicense() { echo " - ${1}, located at ${2}, is licensed as follows: \"\"\" $(echo -e "$3" | sed -e 's/^/ /' -e 's/^ $//' -e 's/ *$//' | sed -e '/./,$!d' | sed -e '/^$/N;/^\n$/D') \"\"\"\ -" >> $tmplicense +" >>"$tmplicense" } - -if ! [ -d "${rootdir}/deps/icu/" ] && ! [ -d "${rootdir}/deps/icu-small/" ]; then +if ! [ -d "$rootdir/deps/icu/" ] && ! [ -d "$rootdir/deps/icu-small/" ]; then echo "ICU not installed, run configure to download it, e.g. ./configure --with-intl=small-icu --download=icu" exit 1 fi - # Dependencies bundled in distributions -addlicense "Acorn" "deps/acorn" "$(cat ${rootdir}/deps/acorn/acorn/LICENSE)" -addlicense "Acorn plugins" "deps/acorn-plugins" "$(cat ${rootdir}/deps/acorn-plugins/acorn-class-fields/LICENSE)" -addlicense "c-ares" "deps/cares" "$(tail -n +3 ${rootdir}/deps/cares/LICENSE.md)" -if [ -f "${rootdir}/deps/icu/LICENSE" ]; then +addlicense "Acorn" "deps/acorn" "$(cat "$rootdir"/deps/acorn/acorn/LICENSE)" +addlicense "Acorn plugins" "deps/acorn-plugins" "$(cat "$rootdir"/deps/acorn-plugins/acorn-class-fields/LICENSE)" +addlicense "c-ares" "deps/cares" "$(tail -n +3 "$rootdir"/deps/cares/LICENSE.md)" +if [ -f "$rootdir/deps/icu/LICENSE" ]; then # ICU 57 and following. Drop the BOM addlicense "ICU" "deps/icu" \ - "$(sed -e '1s/^[^a-zA-Z ]*ICU/ICU/' -e :a \ - -e 's/<[^>]*>//g;s/ / /g;s/ +$//;/]*>//g;s/ / /g;s/ +$//;/]*>//g;s/ / /g;s/ +$//;/]*>//g;s/ / /g;s/ +$//;/]*>//g;s/ / /g;s/ +$//;/]*>//g;s/ / /g;s/ +$//;/]*>//g;s/ / /g;s/ +$//;/]*>//g;s/ / /g;s/ +$//;/